blob: 5ed47427790385fc335168857d02082deb2ab498 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28#include <linux/kmod.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/skbuff.h>
39#include <linux/interrupt.h>
40#include <linux/notifier.h>
41#include <net/sock.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/unaligned.h>
46
47#include <net/bluetooth/bluetooth.h>
48#include <net/bluetooth/hci_core.h>
49
50#ifndef CONFIG_BT_HCI_CORE_DEBUG
51#undef BT_DBG
52#define BT_DBG(D...)
53#endif
54
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
96void hci_req_complete(struct hci_dev *hdev, int result)
97{
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107static void hci_req_cancel(struct hci_dev *hdev, int err)
108{
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
118/* Execute request and wait for completion. */
119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
120 unsigned long opt, __u32 timeout)
121{
122 DECLARE_WAITQUEUE(wait, current);
123 int err = 0;
124
125 BT_DBG("%s start", hdev->name);
126
127 hdev->req_status = HCI_REQ_PEND;
128
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
131
132 req(hdev, opt);
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_err(hdev->req_result);
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
152 };
153
154 hdev->req_status = hdev->req_result = 0;
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
161static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
163{
164 int ret;
165
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178 /* Reset device */
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800185 __le16 param;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Driver initialization */
190
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
196 hci_sched_cmd(hdev);
197 }
198 skb_queue_purge(&hdev->driver_init);
199
200 /* Mandatory initialization */
201
202 /* Reset */
203 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
204 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
210 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
211
212#if 0
213 /* Host buffer size */
214 {
215 struct hci_cp_host_buffer_size cp;
216 cp.acl_mtu = __cpu_to_le16(HCI_MAX_ACL_SIZE);
217 cp.sco_mtu = HCI_MAX_SCO_SIZE;
218 cp.acl_max_pkt = __cpu_to_le16(0xffff);
219 cp.sco_max_pkt = __cpu_to_le16(0xffff);
220 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
221 }
222#endif
223
224 /* Read BD Address */
225 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
226
227 /* Read Voice Setting */
228 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
229
230 /* Optional initialization */
231
232 /* Clear Event Filters */
233 {
234 struct hci_cp_set_event_flt cp;
235 cp.flt_type = HCI_FLT_CLEAR_ALL;
236 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, sizeof(cp), &cp);
237 }
238
239 /* Page timeout ~20 secs */
240 param = __cpu_to_le16(0x8000);
241 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
242
243 /* Connection accept timeout ~20 secs */
244 param = __cpu_to_le16(0x7d00);
245 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
246}
247
248static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
249{
250 __u8 scan = opt;
251
252 BT_DBG("%s %x", hdev->name, scan);
253
254 /* Inquiry and Page scans */
255 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
256}
257
258static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
259{
260 __u8 auth = opt;
261
262 BT_DBG("%s %x", hdev->name, auth);
263
264 /* Authentication */
265 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
266}
267
268static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 encrypt = opt;
271
272 BT_DBG("%s %x", hdev->name, encrypt);
273
274 /* Authentication */
275 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
276}
277
278/* Get HCI device by index.
279 * Device is held on return. */
280struct hci_dev *hci_dev_get(int index)
281{
282 struct hci_dev *hdev = NULL;
283 struct list_head *p;
284
285 BT_DBG("%d", index);
286
287 if (index < 0)
288 return NULL;
289
290 read_lock(&hci_dev_list_lock);
291 list_for_each(p, &hci_dev_list) {
292 struct hci_dev *d = list_entry(p, struct hci_dev, list);
293 if (d->id == index) {
294 hdev = hci_dev_hold(d);
295 break;
296 }
297 }
298 read_unlock(&hci_dev_list_lock);
299 return hdev;
300}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
302/* ---- Inquiry support ---- */
303static void inquiry_cache_flush(struct hci_dev *hdev)
304{
305 struct inquiry_cache *cache = &hdev->inq_cache;
306 struct inquiry_entry *next = cache->list, *e;
307
308 BT_DBG("cache %p", cache);
309
310 cache->list = NULL;
311 while ((e = next)) {
312 next = e->next;
313 kfree(e);
314 }
315}
316
317struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
318{
319 struct inquiry_cache *cache = &hdev->inq_cache;
320 struct inquiry_entry *e;
321
322 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
323
324 for (e = cache->list; e; e = e->next)
325 if (!bacmp(&e->data.bdaddr, bdaddr))
326 break;
327 return e;
328}
329
330void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
331{
332 struct inquiry_cache *cache = &hdev->inq_cache;
333 struct inquiry_entry *e;
334
335 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
336
337 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
338 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200339 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 e->next = cache->list;
342 cache->list = e;
343 }
344
345 memcpy(&e->data, data, sizeof(*data));
346 e->timestamp = jiffies;
347 cache->timestamp = jiffies;
348}
349
350static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
351{
352 struct inquiry_cache *cache = &hdev->inq_cache;
353 struct inquiry_info *info = (struct inquiry_info *) buf;
354 struct inquiry_entry *e;
355 int copied = 0;
356
357 for (e = cache->list; e && copied < num; e = e->next, copied++) {
358 struct inquiry_data *data = &e->data;
359 bacpy(&info->bdaddr, &data->bdaddr);
360 info->pscan_rep_mode = data->pscan_rep_mode;
361 info->pscan_period_mode = data->pscan_period_mode;
362 info->pscan_mode = data->pscan_mode;
363 memcpy(info->dev_class, data->dev_class, 3);
364 info->clock_offset = data->clock_offset;
365 info++;
366 }
367
368 BT_DBG("cache %p, copied %d", cache, copied);
369 return copied;
370}
371
372static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
373{
374 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
375 struct hci_cp_inquiry cp;
376
377 BT_DBG("%s", hdev->name);
378
379 if (test_bit(HCI_INQUIRY, &hdev->flags))
380 return;
381
382 /* Start Inquiry */
383 memcpy(&cp.lap, &ir->lap, 3);
384 cp.length = ir->length;
385 cp.num_rsp = ir->num_rsp;
386 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
387}
388
389int hci_inquiry(void __user *arg)
390{
391 __u8 __user *ptr = arg;
392 struct hci_inquiry_req ir;
393 struct hci_dev *hdev;
394 int err = 0, do_inquiry = 0, max_rsp;
395 long timeo;
396 __u8 *buf;
397
398 if (copy_from_user(&ir, ptr, sizeof(ir)))
399 return -EFAULT;
400
401 if (!(hdev = hci_dev_get(ir.dev_id)))
402 return -ENODEV;
403
404 hci_dev_lock_bh(hdev);
405 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
406 inquiry_cache_empty(hdev) ||
407 ir.flags & IREQ_CACHE_FLUSH) {
408 inquiry_cache_flush(hdev);
409 do_inquiry = 1;
410 }
411 hci_dev_unlock_bh(hdev);
412
Marcel Holtmann04837f62006-07-03 10:02:33 +0200413 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
415 goto done;
416
417 /* for unlimited number of responses we will use buffer with 255 entries */
418 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
419
420 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
421 * copy it to the user space.
422 */
423 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
424 err = -ENOMEM;
425 goto done;
426 }
427
428 hci_dev_lock_bh(hdev);
429 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
430 hci_dev_unlock_bh(hdev);
431
432 BT_DBG("num_rsp %d", ir.num_rsp);
433
434 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
435 ptr += sizeof(ir);
436 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
437 ir.num_rsp))
438 err = -EFAULT;
439 } else
440 err = -EFAULT;
441
442 kfree(buf);
443
444done:
445 hci_dev_put(hdev);
446 return err;
447}
448
449/* ---- HCI ioctl helpers ---- */
450
451int hci_dev_open(__u16 dev)
452{
453 struct hci_dev *hdev;
454 int ret = 0;
455
456 if (!(hdev = hci_dev_get(dev)))
457 return -ENODEV;
458
459 BT_DBG("%s %p", hdev->name, hdev);
460
461 hci_req_lock(hdev);
462
463 if (test_bit(HCI_UP, &hdev->flags)) {
464 ret = -EALREADY;
465 goto done;
466 }
467
468 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
469 set_bit(HCI_RAW, &hdev->flags);
470
471 if (hdev->open(hdev)) {
472 ret = -EIO;
473 goto done;
474 }
475
476 if (!test_bit(HCI_RAW, &hdev->flags)) {
477 atomic_set(&hdev->cmd_cnt, 1);
478 set_bit(HCI_INIT, &hdev->flags);
479
480 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200481 ret = __hci_request(hdev, hci_init_req, 0,
482 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483
484 clear_bit(HCI_INIT, &hdev->flags);
485 }
486
487 if (!ret) {
488 hci_dev_hold(hdev);
489 set_bit(HCI_UP, &hdev->flags);
490 hci_notify(hdev, HCI_DEV_UP);
491 } else {
492 /* Init failed, cleanup */
493 tasklet_kill(&hdev->rx_task);
494 tasklet_kill(&hdev->tx_task);
495 tasklet_kill(&hdev->cmd_task);
496
497 skb_queue_purge(&hdev->cmd_q);
498 skb_queue_purge(&hdev->rx_q);
499
500 if (hdev->flush)
501 hdev->flush(hdev);
502
503 if (hdev->sent_cmd) {
504 kfree_skb(hdev->sent_cmd);
505 hdev->sent_cmd = NULL;
506 }
507
508 hdev->close(hdev);
509 hdev->flags = 0;
510 }
511
512done:
513 hci_req_unlock(hdev);
514 hci_dev_put(hdev);
515 return ret;
516}
517
518static int hci_dev_do_close(struct hci_dev *hdev)
519{
520 BT_DBG("%s %p", hdev->name, hdev);
521
522 hci_req_cancel(hdev, ENODEV);
523 hci_req_lock(hdev);
524
525 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
526 hci_req_unlock(hdev);
527 return 0;
528 }
529
530 /* Kill RX and TX tasks */
531 tasklet_kill(&hdev->rx_task);
532 tasklet_kill(&hdev->tx_task);
533
534 hci_dev_lock_bh(hdev);
535 inquiry_cache_flush(hdev);
536 hci_conn_hash_flush(hdev);
537 hci_dev_unlock_bh(hdev);
538
539 hci_notify(hdev, HCI_DEV_DOWN);
540
541 if (hdev->flush)
542 hdev->flush(hdev);
543
544 /* Reset device */
545 skb_queue_purge(&hdev->cmd_q);
546 atomic_set(&hdev->cmd_cnt, 1);
547 if (!test_bit(HCI_RAW, &hdev->flags)) {
548 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200549 __hci_request(hdev, hci_reset_req, 0,
550 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 clear_bit(HCI_INIT, &hdev->flags);
552 }
553
554 /* Kill cmd task */
555 tasklet_kill(&hdev->cmd_task);
556
557 /* Drop queues */
558 skb_queue_purge(&hdev->rx_q);
559 skb_queue_purge(&hdev->cmd_q);
560 skb_queue_purge(&hdev->raw_q);
561
562 /* Drop last sent command */
563 if (hdev->sent_cmd) {
564 kfree_skb(hdev->sent_cmd);
565 hdev->sent_cmd = NULL;
566 }
567
568 /* After this point our queues are empty
569 * and no tasks are scheduled. */
570 hdev->close(hdev);
571
572 /* Clear flags */
573 hdev->flags = 0;
574
575 hci_req_unlock(hdev);
576
577 hci_dev_put(hdev);
578 return 0;
579}
580
581int hci_dev_close(__u16 dev)
582{
583 struct hci_dev *hdev;
584 int err;
585
586 if (!(hdev = hci_dev_get(dev)))
587 return -ENODEV;
588 err = hci_dev_do_close(hdev);
589 hci_dev_put(hdev);
590 return err;
591}
592
593int hci_dev_reset(__u16 dev)
594{
595 struct hci_dev *hdev;
596 int ret = 0;
597
598 if (!(hdev = hci_dev_get(dev)))
599 return -ENODEV;
600
601 hci_req_lock(hdev);
602 tasklet_disable(&hdev->tx_task);
603
604 if (!test_bit(HCI_UP, &hdev->flags))
605 goto done;
606
607 /* Drop queues */
608 skb_queue_purge(&hdev->rx_q);
609 skb_queue_purge(&hdev->cmd_q);
610
611 hci_dev_lock_bh(hdev);
612 inquiry_cache_flush(hdev);
613 hci_conn_hash_flush(hdev);
614 hci_dev_unlock_bh(hdev);
615
616 if (hdev->flush)
617 hdev->flush(hdev);
618
619 atomic_set(&hdev->cmd_cnt, 1);
620 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
621
622 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200623 ret = __hci_request(hdev, hci_reset_req, 0,
624 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
626done:
627 tasklet_enable(&hdev->tx_task);
628 hci_req_unlock(hdev);
629 hci_dev_put(hdev);
630 return ret;
631}
632
633int hci_dev_reset_stat(__u16 dev)
634{
635 struct hci_dev *hdev;
636 int ret = 0;
637
638 if (!(hdev = hci_dev_get(dev)))
639 return -ENODEV;
640
641 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
642
643 hci_dev_put(hdev);
644
645 return ret;
646}
647
648int hci_dev_cmd(unsigned int cmd, void __user *arg)
649{
650 struct hci_dev *hdev;
651 struct hci_dev_req dr;
652 int err = 0;
653
654 if (copy_from_user(&dr, arg, sizeof(dr)))
655 return -EFAULT;
656
657 if (!(hdev = hci_dev_get(dr.dev_id)))
658 return -ENODEV;
659
660 switch (cmd) {
661 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200662 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
663 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 break;
665
666 case HCISETENCRYPT:
667 if (!lmp_encrypt_capable(hdev)) {
668 err = -EOPNOTSUPP;
669 break;
670 }
671
672 if (!test_bit(HCI_AUTH, &hdev->flags)) {
673 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200674 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
675 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (err)
677 break;
678 }
679
Marcel Holtmann04837f62006-07-03 10:02:33 +0200680 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
681 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 break;
683
684 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200685 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 break;
688
689 case HCISETPTYPE:
690 hdev->pkt_type = (__u16) dr.dev_opt;
691 break;
692
693 case HCISETLINKPOL:
694 hdev->link_policy = (__u16) dr.dev_opt;
695 break;
696
697 case HCISETLINKMODE:
698 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
699 break;
700
701 case HCISETACLMTU:
702 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
703 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
704 break;
705
706 case HCISETSCOMTU:
707 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
708 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
709 break;
710
711 default:
712 err = -EINVAL;
713 break;
714 }
715 hci_dev_put(hdev);
716 return err;
717}
718
719int hci_get_dev_list(void __user *arg)
720{
721 struct hci_dev_list_req *dl;
722 struct hci_dev_req *dr;
723 struct list_head *p;
724 int n = 0, size, err;
725 __u16 dev_num;
726
727 if (get_user(dev_num, (__u16 __user *) arg))
728 return -EFAULT;
729
730 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
731 return -EINVAL;
732
733 size = sizeof(*dl) + dev_num * sizeof(*dr);
734
735 if (!(dl = kmalloc(size, GFP_KERNEL)))
736 return -ENOMEM;
737
738 dr = dl->dev_req;
739
740 read_lock_bh(&hci_dev_list_lock);
741 list_for_each(p, &hci_dev_list) {
742 struct hci_dev *hdev;
743 hdev = list_entry(p, struct hci_dev, list);
744 (dr + n)->dev_id = hdev->id;
745 (dr + n)->dev_opt = hdev->flags;
746 if (++n >= dev_num)
747 break;
748 }
749 read_unlock_bh(&hci_dev_list_lock);
750
751 dl->dev_num = n;
752 size = sizeof(*dl) + n * sizeof(*dr);
753
754 err = copy_to_user(arg, dl, size);
755 kfree(dl);
756
757 return err ? -EFAULT : 0;
758}
759
760int hci_get_dev_info(void __user *arg)
761{
762 struct hci_dev *hdev;
763 struct hci_dev_info di;
764 int err = 0;
765
766 if (copy_from_user(&di, arg, sizeof(di)))
767 return -EFAULT;
768
769 if (!(hdev = hci_dev_get(di.dev_id)))
770 return -ENODEV;
771
772 strcpy(di.name, hdev->name);
773 di.bdaddr = hdev->bdaddr;
774 di.type = hdev->type;
775 di.flags = hdev->flags;
776 di.pkt_type = hdev->pkt_type;
777 di.acl_mtu = hdev->acl_mtu;
778 di.acl_pkts = hdev->acl_pkts;
779 di.sco_mtu = hdev->sco_mtu;
780 di.sco_pkts = hdev->sco_pkts;
781 di.link_policy = hdev->link_policy;
782 di.link_mode = hdev->link_mode;
783
784 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
785 memcpy(&di.features, &hdev->features, sizeof(di.features));
786
787 if (copy_to_user(arg, &di, sizeof(di)))
788 err = -EFAULT;
789
790 hci_dev_put(hdev);
791
792 return err;
793}
794
795/* ---- Interface to HCI drivers ---- */
796
797/* Alloc HCI device */
798struct hci_dev *hci_alloc_dev(void)
799{
800 struct hci_dev *hdev;
801
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200802 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 if (!hdev)
804 return NULL;
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 skb_queue_head_init(&hdev->driver_init);
807
808 return hdev;
809}
810EXPORT_SYMBOL(hci_alloc_dev);
811
812/* Free HCI device */
813void hci_free_dev(struct hci_dev *hdev)
814{
815 skb_queue_purge(&hdev->driver_init);
816
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200817 /* will free via device release */
818 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820EXPORT_SYMBOL(hci_free_dev);
821
822/* Register HCI device */
823int hci_register_dev(struct hci_dev *hdev)
824{
825 struct list_head *head = &hci_dev_list, *p;
826 int id = 0;
827
828 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
829
830 if (!hdev->open || !hdev->close || !hdev->destruct)
831 return -EINVAL;
832
833 write_lock_bh(&hci_dev_list_lock);
834
835 /* Find first available device id */
836 list_for_each(p, &hci_dev_list) {
837 if (list_entry(p, struct hci_dev, list)->id != id)
838 break;
839 head = p; id++;
840 }
841
842 sprintf(hdev->name, "hci%d", id);
843 hdev->id = id;
844 list_add(&hdev->list, head);
845
846 atomic_set(&hdev->refcnt, 1);
847 spin_lock_init(&hdev->lock);
848
849 hdev->flags = 0;
850 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
851 hdev->link_mode = (HCI_LM_ACCEPT);
852
Marcel Holtmann04837f62006-07-03 10:02:33 +0200853 hdev->idle_timeout = 0;
854 hdev->sniff_max_interval = 800;
855 hdev->sniff_min_interval = 80;
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
858 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
859 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
860
861 skb_queue_head_init(&hdev->rx_q);
862 skb_queue_head_init(&hdev->cmd_q);
863 skb_queue_head_init(&hdev->raw_q);
864
865 init_waitqueue_head(&hdev->req_wait_q);
866 init_MUTEX(&hdev->req_lock);
867
868 inquiry_cache_init(hdev);
869
870 hci_conn_hash_init(hdev);
871
872 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
873
874 atomic_set(&hdev->promisc, 0);
875
876 write_unlock_bh(&hci_dev_list_lock);
877
878 hci_register_sysfs(hdev);
879
880 hci_notify(hdev, HCI_DEV_REG);
881
882 return id;
883}
884EXPORT_SYMBOL(hci_register_dev);
885
886/* Unregister HCI device */
887int hci_unregister_dev(struct hci_dev *hdev)
888{
889 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
890
891 hci_unregister_sysfs(hdev);
892
893 write_lock_bh(&hci_dev_list_lock);
894 list_del(&hdev->list);
895 write_unlock_bh(&hci_dev_list_lock);
896
897 hci_dev_do_close(hdev);
898
899 hci_notify(hdev, HCI_DEV_UNREG);
900
901 __hci_dev_put(hdev);
902 return 0;
903}
904EXPORT_SYMBOL(hci_unregister_dev);
905
906/* Suspend HCI device */
907int hci_suspend_dev(struct hci_dev *hdev)
908{
909 hci_notify(hdev, HCI_DEV_SUSPEND);
910 return 0;
911}
912EXPORT_SYMBOL(hci_suspend_dev);
913
914/* Resume HCI device */
915int hci_resume_dev(struct hci_dev *hdev)
916{
917 hci_notify(hdev, HCI_DEV_RESUME);
918 return 0;
919}
920EXPORT_SYMBOL(hci_resume_dev);
921
922/* ---- Interface to upper protocols ---- */
923
924/* Register/Unregister protocols.
925 * hci_task_lock is used to ensure that no tasks are running. */
926int hci_register_proto(struct hci_proto *hp)
927{
928 int err = 0;
929
930 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
931
932 if (hp->id >= HCI_MAX_PROTO)
933 return -EINVAL;
934
935 write_lock_bh(&hci_task_lock);
936
937 if (!hci_proto[hp->id])
938 hci_proto[hp->id] = hp;
939 else
940 err = -EEXIST;
941
942 write_unlock_bh(&hci_task_lock);
943
944 return err;
945}
946EXPORT_SYMBOL(hci_register_proto);
947
948int hci_unregister_proto(struct hci_proto *hp)
949{
950 int err = 0;
951
952 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
953
954 if (hp->id >= HCI_MAX_PROTO)
955 return -EINVAL;
956
957 write_lock_bh(&hci_task_lock);
958
959 if (hci_proto[hp->id])
960 hci_proto[hp->id] = NULL;
961 else
962 err = -ENOENT;
963
964 write_unlock_bh(&hci_task_lock);
965
966 return err;
967}
968EXPORT_SYMBOL(hci_unregister_proto);
969
970int hci_register_cb(struct hci_cb *cb)
971{
972 BT_DBG("%p name %s", cb, cb->name);
973
974 write_lock_bh(&hci_cb_list_lock);
975 list_add(&cb->list, &hci_cb_list);
976 write_unlock_bh(&hci_cb_list_lock);
977
978 return 0;
979}
980EXPORT_SYMBOL(hci_register_cb);
981
982int hci_unregister_cb(struct hci_cb *cb)
983{
984 BT_DBG("%p name %s", cb, cb->name);
985
986 write_lock_bh(&hci_cb_list_lock);
987 list_del(&cb->list);
988 write_unlock_bh(&hci_cb_list_lock);
989
990 return 0;
991}
992EXPORT_SYMBOL(hci_unregister_cb);
993
994static int hci_send_frame(struct sk_buff *skb)
995{
996 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
997
998 if (!hdev) {
999 kfree_skb(skb);
1000 return -ENODEV;
1001 }
1002
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001003 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 if (atomic_read(&hdev->promisc)) {
1006 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001007 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 hci_send_to_sock(hdev, skb);
1010 }
1011
1012 /* Get rid of skb owner, prior to sending to the driver. */
1013 skb_orphan(skb);
1014
1015 return hdev->send(skb);
1016}
1017
1018/* Send HCI command */
1019int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1020{
1021 int len = HCI_COMMAND_HDR_SIZE + plen;
1022 struct hci_command_hdr *hdr;
1023 struct sk_buff *skb;
1024
1025 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1026
1027 skb = bt_skb_alloc(len, GFP_ATOMIC);
1028 if (!skb) {
1029 BT_ERR("%s Can't allocate memory for HCI command", hdev->name);
1030 return -ENOMEM;
1031 }
1032
1033 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1034 hdr->opcode = __cpu_to_le16(hci_opcode_pack(ogf, ocf));
1035 hdr->plen = plen;
1036
1037 if (plen)
1038 memcpy(skb_put(skb, plen), param, plen);
1039
1040 BT_DBG("skb len %d", skb->len);
1041
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001042 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043 skb->dev = (void *) hdev;
1044 skb_queue_tail(&hdev->cmd_q, skb);
1045 hci_sched_cmd(hdev);
1046
1047 return 0;
1048}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
1050/* Get data from the previously sent command */
1051void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1052{
1053 struct hci_command_hdr *hdr;
1054
1055 if (!hdev->sent_cmd)
1056 return NULL;
1057
1058 hdr = (void *) hdev->sent_cmd->data;
1059
1060 if (hdr->opcode != __cpu_to_le16(hci_opcode_pack(ogf, ocf)))
1061 return NULL;
1062
1063 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1064
1065 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1066}
1067
1068/* Send ACL data */
1069static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1070{
1071 struct hci_acl_hdr *hdr;
1072 int len = skb->len;
1073
1074 hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE);
1075 hdr->handle = __cpu_to_le16(hci_handle_pack(handle, flags));
1076 hdr->dlen = __cpu_to_le16(len);
1077
1078 skb->h.raw = (void *) hdr;
1079}
1080
1081int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1082{
1083 struct hci_dev *hdev = conn->hdev;
1084 struct sk_buff *list;
1085
1086 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1087
1088 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001089 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1091
1092 if (!(list = skb_shinfo(skb)->frag_list)) {
1093 /* Non fragmented */
1094 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1095
1096 skb_queue_tail(&conn->data_q, skb);
1097 } else {
1098 /* Fragmented */
1099 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1100
1101 skb_shinfo(skb)->frag_list = NULL;
1102
1103 /* Queue all fragments atomically */
1104 spin_lock_bh(&conn->data_q.lock);
1105
1106 __skb_queue_tail(&conn->data_q, skb);
1107 do {
1108 skb = list; list = list->next;
1109
1110 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001111 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1113
1114 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1115
1116 __skb_queue_tail(&conn->data_q, skb);
1117 } while (list);
1118
1119 spin_unlock_bh(&conn->data_q.lock);
1120 }
1121
1122 hci_sched_tx(hdev);
1123 return 0;
1124}
1125EXPORT_SYMBOL(hci_send_acl);
1126
1127/* Send SCO data */
1128int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1129{
1130 struct hci_dev *hdev = conn->hdev;
1131 struct hci_sco_hdr hdr;
1132
1133 BT_DBG("%s len %d", hdev->name, skb->len);
1134
1135 if (skb->len > hdev->sco_mtu) {
1136 kfree_skb(skb);
1137 return -EINVAL;
1138 }
1139
1140 hdr.handle = __cpu_to_le16(conn->handle);
1141 hdr.dlen = skb->len;
1142
1143 skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE);
1144 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE);
1145
1146 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001147 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 skb_queue_tail(&conn->data_q, skb);
1149 hci_sched_tx(hdev);
1150 return 0;
1151}
1152EXPORT_SYMBOL(hci_send_sco);
1153
1154/* ---- HCI TX task (outgoing data) ---- */
1155
1156/* HCI Connection scheduler */
1157static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1158{
1159 struct hci_conn_hash *h = &hdev->conn_hash;
1160 struct hci_conn *conn = NULL;
1161 int num = 0, min = ~0;
1162 struct list_head *p;
1163
1164 /* We don't have to lock device here. Connections are always
1165 * added and removed with TX task disabled. */
1166 list_for_each(p, &h->list) {
1167 struct hci_conn *c;
1168 c = list_entry(p, struct hci_conn, list);
1169
1170 if (c->type != type || c->state != BT_CONNECTED
1171 || skb_queue_empty(&c->data_q))
1172 continue;
1173 num++;
1174
1175 if (c->sent < min) {
1176 min = c->sent;
1177 conn = c;
1178 }
1179 }
1180
1181 if (conn) {
1182 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1183 int q = cnt / num;
1184 *quote = q ? q : 1;
1185 } else
1186 *quote = 0;
1187
1188 BT_DBG("conn %p quote %d", conn, *quote);
1189 return conn;
1190}
1191
1192static inline void hci_acl_tx_to(struct hci_dev *hdev)
1193{
1194 struct hci_conn_hash *h = &hdev->conn_hash;
1195 struct list_head *p;
1196 struct hci_conn *c;
1197
1198 BT_ERR("%s ACL tx timeout", hdev->name);
1199
1200 /* Kill stalled connections */
1201 list_for_each(p, &h->list) {
1202 c = list_entry(p, struct hci_conn, list);
1203 if (c->type == ACL_LINK && c->sent) {
1204 BT_ERR("%s killing stalled ACL connection %s",
1205 hdev->name, batostr(&c->dst));
1206 hci_acl_disconn(c, 0x13);
1207 }
1208 }
1209}
1210
1211static inline void hci_sched_acl(struct hci_dev *hdev)
1212{
1213 struct hci_conn *conn;
1214 struct sk_buff *skb;
1215 int quote;
1216
1217 BT_DBG("%s", hdev->name);
1218
1219 if (!test_bit(HCI_RAW, &hdev->flags)) {
1220 /* ACL tx timeout must be longer than maximum
1221 * link supervision timeout (40.9 seconds) */
1222 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1223 hci_acl_tx_to(hdev);
1224 }
1225
1226 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1227 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1228 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001229
1230 hci_conn_enter_active_mode(conn);
1231
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 hci_send_frame(skb);
1233 hdev->acl_last_tx = jiffies;
1234
1235 hdev->acl_cnt--;
1236 conn->sent++;
1237 }
1238 }
1239}
1240
1241/* Schedule SCO */
1242static inline void hci_sched_sco(struct hci_dev *hdev)
1243{
1244 struct hci_conn *conn;
1245 struct sk_buff *skb;
1246 int quote;
1247
1248 BT_DBG("%s", hdev->name);
1249
1250 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1251 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1252 BT_DBG("skb %p len %d", skb, skb->len);
1253 hci_send_frame(skb);
1254
1255 conn->sent++;
1256 if (conn->sent == ~0)
1257 conn->sent = 0;
1258 }
1259 }
1260}
1261
1262static void hci_tx_task(unsigned long arg)
1263{
1264 struct hci_dev *hdev = (struct hci_dev *) arg;
1265 struct sk_buff *skb;
1266
1267 read_lock(&hci_task_lock);
1268
1269 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1270
1271 /* Schedule queues and send stuff to HCI driver */
1272
1273 hci_sched_acl(hdev);
1274
1275 hci_sched_sco(hdev);
1276
1277 /* Send next queued raw (unknown type) packet */
1278 while ((skb = skb_dequeue(&hdev->raw_q)))
1279 hci_send_frame(skb);
1280
1281 read_unlock(&hci_task_lock);
1282}
1283
1284/* ----- HCI RX task (incoming data proccessing) ----- */
1285
1286/* ACL data packet */
1287static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1288{
1289 struct hci_acl_hdr *hdr = (void *) skb->data;
1290 struct hci_conn *conn;
1291 __u16 handle, flags;
1292
1293 skb_pull(skb, HCI_ACL_HDR_SIZE);
1294
1295 handle = __le16_to_cpu(hdr->handle);
1296 flags = hci_flags(handle);
1297 handle = hci_handle(handle);
1298
1299 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1300
1301 hdev->stat.acl_rx++;
1302
1303 hci_dev_lock(hdev);
1304 conn = hci_conn_hash_lookup_handle(hdev, handle);
1305 hci_dev_unlock(hdev);
1306
1307 if (conn) {
1308 register struct hci_proto *hp;
1309
Marcel Holtmann04837f62006-07-03 10:02:33 +02001310 hci_conn_enter_active_mode(conn);
1311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 /* Send to upper protocol */
1313 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1314 hp->recv_acldata(conn, skb, flags);
1315 return;
1316 }
1317 } else {
1318 BT_ERR("%s ACL packet for unknown connection handle %d",
1319 hdev->name, handle);
1320 }
1321
1322 kfree_skb(skb);
1323}
1324
1325/* SCO data packet */
1326static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1327{
1328 struct hci_sco_hdr *hdr = (void *) skb->data;
1329 struct hci_conn *conn;
1330 __u16 handle;
1331
1332 skb_pull(skb, HCI_SCO_HDR_SIZE);
1333
1334 handle = __le16_to_cpu(hdr->handle);
1335
1336 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1337
1338 hdev->stat.sco_rx++;
1339
1340 hci_dev_lock(hdev);
1341 conn = hci_conn_hash_lookup_handle(hdev, handle);
1342 hci_dev_unlock(hdev);
1343
1344 if (conn) {
1345 register struct hci_proto *hp;
1346
1347 /* Send to upper protocol */
1348 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1349 hp->recv_scodata(conn, skb);
1350 return;
1351 }
1352 } else {
1353 BT_ERR("%s SCO packet for unknown connection handle %d",
1354 hdev->name, handle);
1355 }
1356
1357 kfree_skb(skb);
1358}
1359
Marcel Holtmann65164552005-10-28 19:20:48 +02001360static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct hci_dev *hdev = (struct hci_dev *) arg;
1363 struct sk_buff *skb;
1364
1365 BT_DBG("%s", hdev->name);
1366
1367 read_lock(&hci_task_lock);
1368
1369 while ((skb = skb_dequeue(&hdev->rx_q))) {
1370 if (atomic_read(&hdev->promisc)) {
1371 /* Send copy to the sockets */
1372 hci_send_to_sock(hdev, skb);
1373 }
1374
1375 if (test_bit(HCI_RAW, &hdev->flags)) {
1376 kfree_skb(skb);
1377 continue;
1378 }
1379
1380 if (test_bit(HCI_INIT, &hdev->flags)) {
1381 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001382 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 case HCI_ACLDATA_PKT:
1384 case HCI_SCODATA_PKT:
1385 kfree_skb(skb);
1386 continue;
1387 };
1388 }
1389
1390 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001391 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 case HCI_EVENT_PKT:
1393 hci_event_packet(hdev, skb);
1394 break;
1395
1396 case HCI_ACLDATA_PKT:
1397 BT_DBG("%s ACL data packet", hdev->name);
1398 hci_acldata_packet(hdev, skb);
1399 break;
1400
1401 case HCI_SCODATA_PKT:
1402 BT_DBG("%s SCO data packet", hdev->name);
1403 hci_scodata_packet(hdev, skb);
1404 break;
1405
1406 default:
1407 kfree_skb(skb);
1408 break;
1409 }
1410 }
1411
1412 read_unlock(&hci_task_lock);
1413}
1414
1415static void hci_cmd_task(unsigned long arg)
1416{
1417 struct hci_dev *hdev = (struct hci_dev *) arg;
1418 struct sk_buff *skb;
1419
1420 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1421
1422 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1423 BT_ERR("%s command tx timeout", hdev->name);
1424 atomic_set(&hdev->cmd_cnt, 1);
1425 }
1426
1427 /* Send queued commands */
1428 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1429 if (hdev->sent_cmd)
1430 kfree_skb(hdev->sent_cmd);
1431
1432 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1433 atomic_dec(&hdev->cmd_cnt);
1434 hci_send_frame(skb);
1435 hdev->cmd_last_tx = jiffies;
1436 } else {
1437 skb_queue_head(&hdev->cmd_q, skb);
1438 hci_sched_cmd(hdev);
1439 }
1440 }
1441}