blob: 18e3afc964df1b14c23cf149ad3c62078e463281 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
28#include <linux/kmod.h>
29
30#include <linux/types.h>
31#include <linux/errno.h>
32#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/sched.h>
34#include <linux/slab.h>
35#include <linux/poll.h>
36#include <linux/fcntl.h>
37#include <linux/init.h>
38#include <linux/skbuff.h>
39#include <linux/interrupt.h>
40#include <linux/notifier.h>
41#include <net/sock.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/unaligned.h>
46
47#include <net/bluetooth/bluetooth.h>
48#include <net/bluetooth/hci_core.h>
49
50#ifndef CONFIG_BT_HCI_CORE_DEBUG
51#undef BT_DBG
52#define BT_DBG(D...)
53#endif
54
55static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
96void hci_req_complete(struct hci_dev *hdev, int result)
97{
98 BT_DBG("%s result 0x%2.2x", hdev->name, result);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = result;
102 hdev->req_status = HCI_REQ_DONE;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107static void hci_req_cancel(struct hci_dev *hdev, int err)
108{
109 BT_DBG("%s err 0x%2.2x", hdev->name, err);
110
111 if (hdev->req_status == HCI_REQ_PEND) {
112 hdev->req_result = err;
113 hdev->req_status = HCI_REQ_CANCELED;
114 wake_up_interruptible(&hdev->req_wait_q);
115 }
116}
117
118/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900119static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 unsigned long opt, __u32 timeout)
121{
122 DECLARE_WAITQUEUE(wait, current);
123 int err = 0;
124
125 BT_DBG("%s start", hdev->name);
126
127 hdev->req_status = HCI_REQ_PEND;
128
129 add_wait_queue(&hdev->req_wait_q, &wait);
130 set_current_state(TASK_INTERRUPTIBLE);
131
132 req(hdev, opt);
133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
142 err = -bt_err(hdev->req_result);
143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
154 hdev->req_status = hdev->req_result = 0;
155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
161static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
162 unsigned long opt, __u32 timeout)
163{
164 int ret;
165
166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178 /* Reset device */
179 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800185 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200186 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Driver initialization */
191
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 skb->dev = (void *) hdev;
196 skb_queue_tail(&hdev->cmd_q, skb);
197 hci_sched_cmd(hdev);
198 }
199 skb_queue_purge(&hdev->driver_init);
200
201 /* Mandatory initialization */
202
203 /* Reset */
204 if (test_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks))
205 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_RESET, 0, NULL);
206
207 /* Read Local Supported Features */
208 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_FEATURES, 0, NULL);
209
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200210 /* Read Local Version */
211 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_LOCAL_VERSION, 0, NULL);
212
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
214 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BUFFER_SIZE, 0, NULL);
215
216#if 0
217 /* Host buffer size */
218 {
219 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700220 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700222 cp.acl_max_pkt = cpu_to_le16(0xffff);
223 cp.sco_max_pkt = cpu_to_le16(0xffff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_HOST_BUFFER_SIZE, sizeof(cp), &cp);
225 }
226#endif
227
228 /* Read BD Address */
229 hci_send_cmd(hdev, OGF_INFO_PARAM, OCF_READ_BD_ADDR, 0, NULL);
230
231 /* Read Voice Setting */
232 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_READ_VOICE_SETTING, 0, NULL);
233
234 /* Optional initialization */
235
236 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200237 flt_type = HCI_FLT_CLEAR_ALL;
238 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700241 param = cpu_to_le16(0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_PG_TIMEOUT, 2, &param);
243
244 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700245 param = cpu_to_le16(0x7d00);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_CA_TIMEOUT, 2, &param);
247}
248
249static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250{
251 __u8 scan = opt;
252
253 BT_DBG("%s %x", hdev->name, scan);
254
255 /* Inquiry and Page scans */
256 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_SCAN_ENABLE, 1, &scan);
257}
258
259static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260{
261 __u8 auth = opt;
262
263 BT_DBG("%s %x", hdev->name, auth);
264
265 /* Authentication */
266 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_AUTH_ENABLE, 1, &auth);
267}
268
269static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 encrypt = opt;
272
273 BT_DBG("%s %x", hdev->name, encrypt);
274
275 /* Authentication */
276 hci_send_cmd(hdev, OGF_HOST_CTL, OCF_WRITE_ENCRYPT_MODE, 1, &encrypt);
277}
278
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900279/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 * Device is held on return. */
281struct hci_dev *hci_dev_get(int index)
282{
283 struct hci_dev *hdev = NULL;
284 struct list_head *p;
285
286 BT_DBG("%d", index);
287
288 if (index < 0)
289 return NULL;
290
291 read_lock(&hci_dev_list_lock);
292 list_for_each(p, &hci_dev_list) {
293 struct hci_dev *d = list_entry(p, struct hci_dev, list);
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
304static void inquiry_cache_flush(struct hci_dev *hdev)
305{
306 struct inquiry_cache *cache = &hdev->inq_cache;
307 struct inquiry_entry *next = cache->list, *e;
308
309 BT_DBG("cache %p", cache);
310
311 cache->list = NULL;
312 while ((e = next)) {
313 next = e->next;
314 kfree(e);
315 }
316}
317
318struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
319{
320 struct inquiry_cache *cache = &hdev->inq_cache;
321 struct inquiry_entry *e;
322
323 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
324
325 for (e = cache->list; e; e = e->next)
326 if (!bacmp(&e->data.bdaddr, bdaddr))
327 break;
328 return e;
329}
330
331void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *e;
335
336 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
337
338 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
339 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200340 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 e->next = cache->list;
343 cache->list = e;
344 }
345
346 memcpy(&e->data, data, sizeof(*data));
347 e->timestamp = jiffies;
348 cache->timestamp = jiffies;
349}
350
351static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
352{
353 struct inquiry_cache *cache = &hdev->inq_cache;
354 struct inquiry_info *info = (struct inquiry_info *) buf;
355 struct inquiry_entry *e;
356 int copied = 0;
357
358 for (e = cache->list; e && copied < num; e = e->next, copied++) {
359 struct inquiry_data *data = &e->data;
360 bacpy(&info->bdaddr, &data->bdaddr);
361 info->pscan_rep_mode = data->pscan_rep_mode;
362 info->pscan_period_mode = data->pscan_period_mode;
363 info->pscan_mode = data->pscan_mode;
364 memcpy(info->dev_class, data->dev_class, 3);
365 info->clock_offset = data->clock_offset;
366 info++;
367 }
368
369 BT_DBG("cache %p, copied %d", cache, copied);
370 return copied;
371}
372
373static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
374{
375 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
376 struct hci_cp_inquiry cp;
377
378 BT_DBG("%s", hdev->name);
379
380 if (test_bit(HCI_INQUIRY, &hdev->flags))
381 return;
382
383 /* Start Inquiry */
384 memcpy(&cp.lap, &ir->lap, 3);
385 cp.length = ir->length;
386 cp.num_rsp = ir->num_rsp;
387 hci_send_cmd(hdev, OGF_LINK_CTL, OCF_INQUIRY, sizeof(cp), &cp);
388}
389
390int hci_inquiry(void __user *arg)
391{
392 __u8 __user *ptr = arg;
393 struct hci_inquiry_req ir;
394 struct hci_dev *hdev;
395 int err = 0, do_inquiry = 0, max_rsp;
396 long timeo;
397 __u8 *buf;
398
399 if (copy_from_user(&ir, ptr, sizeof(ir)))
400 return -EFAULT;
401
402 if (!(hdev = hci_dev_get(ir.dev_id)))
403 return -ENODEV;
404
405 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900406 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 inquiry_cache_empty(hdev) ||
408 ir.flags & IREQ_CACHE_FLUSH) {
409 inquiry_cache_flush(hdev);
410 do_inquiry = 1;
411 }
412 hci_dev_unlock_bh(hdev);
413
Marcel Holtmann04837f62006-07-03 10:02:33 +0200414 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
416 goto done;
417
418 /* for unlimited number of responses we will use buffer with 255 entries */
419 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
420
421 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
422 * copy it to the user space.
423 */
424 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
425 err = -ENOMEM;
426 goto done;
427 }
428
429 hci_dev_lock_bh(hdev);
430 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
431 hci_dev_unlock_bh(hdev);
432
433 BT_DBG("num_rsp %d", ir.num_rsp);
434
435 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
436 ptr += sizeof(ir);
437 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
438 ir.num_rsp))
439 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900440 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 err = -EFAULT;
442
443 kfree(buf);
444
445done:
446 hci_dev_put(hdev);
447 return err;
448}
449
450/* ---- HCI ioctl helpers ---- */
451
452int hci_dev_open(__u16 dev)
453{
454 struct hci_dev *hdev;
455 int ret = 0;
456
457 if (!(hdev = hci_dev_get(dev)))
458 return -ENODEV;
459
460 BT_DBG("%s %p", hdev->name, hdev);
461
462 hci_req_lock(hdev);
463
464 if (test_bit(HCI_UP, &hdev->flags)) {
465 ret = -EALREADY;
466 goto done;
467 }
468
469 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
470 set_bit(HCI_RAW, &hdev->flags);
471
472 if (hdev->open(hdev)) {
473 ret = -EIO;
474 goto done;
475 }
476
477 if (!test_bit(HCI_RAW, &hdev->flags)) {
478 atomic_set(&hdev->cmd_cnt, 1);
479 set_bit(HCI_INIT, &hdev->flags);
480
481 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200482 ret = __hci_request(hdev, hci_init_req, 0,
483 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 clear_bit(HCI_INIT, &hdev->flags);
486 }
487
488 if (!ret) {
489 hci_dev_hold(hdev);
490 set_bit(HCI_UP, &hdev->flags);
491 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900492 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 /* Init failed, cleanup */
494 tasklet_kill(&hdev->rx_task);
495 tasklet_kill(&hdev->tx_task);
496 tasklet_kill(&hdev->cmd_task);
497
498 skb_queue_purge(&hdev->cmd_q);
499 skb_queue_purge(&hdev->rx_q);
500
501 if (hdev->flush)
502 hdev->flush(hdev);
503
504 if (hdev->sent_cmd) {
505 kfree_skb(hdev->sent_cmd);
506 hdev->sent_cmd = NULL;
507 }
508
509 hdev->close(hdev);
510 hdev->flags = 0;
511 }
512
513done:
514 hci_req_unlock(hdev);
515 hci_dev_put(hdev);
516 return ret;
517}
518
519static int hci_dev_do_close(struct hci_dev *hdev)
520{
521 BT_DBG("%s %p", hdev->name, hdev);
522
523 hci_req_cancel(hdev, ENODEV);
524 hci_req_lock(hdev);
525
526 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
527 hci_req_unlock(hdev);
528 return 0;
529 }
530
531 /* Kill RX and TX tasks */
532 tasklet_kill(&hdev->rx_task);
533 tasklet_kill(&hdev->tx_task);
534
535 hci_dev_lock_bh(hdev);
536 inquiry_cache_flush(hdev);
537 hci_conn_hash_flush(hdev);
538 hci_dev_unlock_bh(hdev);
539
540 hci_notify(hdev, HCI_DEV_DOWN);
541
542 if (hdev->flush)
543 hdev->flush(hdev);
544
545 /* Reset device */
546 skb_queue_purge(&hdev->cmd_q);
547 atomic_set(&hdev->cmd_cnt, 1);
548 if (!test_bit(HCI_RAW, &hdev->flags)) {
549 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200550 __hci_request(hdev, hci_reset_req, 0,
551 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 clear_bit(HCI_INIT, &hdev->flags);
553 }
554
555 /* Kill cmd task */
556 tasklet_kill(&hdev->cmd_task);
557
558 /* Drop queues */
559 skb_queue_purge(&hdev->rx_q);
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->raw_q);
562
563 /* Drop last sent command */
564 if (hdev->sent_cmd) {
565 kfree_skb(hdev->sent_cmd);
566 hdev->sent_cmd = NULL;
567 }
568
569 /* After this point our queues are empty
570 * and no tasks are scheduled. */
571 hdev->close(hdev);
572
573 /* Clear flags */
574 hdev->flags = 0;
575
576 hci_req_unlock(hdev);
577
578 hci_dev_put(hdev);
579 return 0;
580}
581
582int hci_dev_close(__u16 dev)
583{
584 struct hci_dev *hdev;
585 int err;
586
587 if (!(hdev = hci_dev_get(dev)))
588 return -ENODEV;
589 err = hci_dev_do_close(hdev);
590 hci_dev_put(hdev);
591 return err;
592}
593
594int hci_dev_reset(__u16 dev)
595{
596 struct hci_dev *hdev;
597 int ret = 0;
598
599 if (!(hdev = hci_dev_get(dev)))
600 return -ENODEV;
601
602 hci_req_lock(hdev);
603 tasklet_disable(&hdev->tx_task);
604
605 if (!test_bit(HCI_UP, &hdev->flags))
606 goto done;
607
608 /* Drop queues */
609 skb_queue_purge(&hdev->rx_q);
610 skb_queue_purge(&hdev->cmd_q);
611
612 hci_dev_lock_bh(hdev);
613 inquiry_cache_flush(hdev);
614 hci_conn_hash_flush(hdev);
615 hci_dev_unlock_bh(hdev);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900620 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
622
623 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200624 ret = __hci_request(hdev, hci_reset_req, 0,
625 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626
627done:
628 tasklet_enable(&hdev->tx_task);
629 hci_req_unlock(hdev);
630 hci_dev_put(hdev);
631 return ret;
632}
633
634int hci_dev_reset_stat(__u16 dev)
635{
636 struct hci_dev *hdev;
637 int ret = 0;
638
639 if (!(hdev = hci_dev_get(dev)))
640 return -ENODEV;
641
642 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
643
644 hci_dev_put(hdev);
645
646 return ret;
647}
648
649int hci_dev_cmd(unsigned int cmd, void __user *arg)
650{
651 struct hci_dev *hdev;
652 struct hci_dev_req dr;
653 int err = 0;
654
655 if (copy_from_user(&dr, arg, sizeof(dr)))
656 return -EFAULT;
657
658 if (!(hdev = hci_dev_get(dr.dev_id)))
659 return -ENODEV;
660
661 switch (cmd) {
662 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200663 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
664 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 break;
666
667 case HCISETENCRYPT:
668 if (!lmp_encrypt_capable(hdev)) {
669 err = -EOPNOTSUPP;
670 break;
671 }
672
673 if (!test_bit(HCI_AUTH, &hdev->flags)) {
674 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 if (err)
678 break;
679 }
680
Marcel Holtmann04837f62006-07-03 10:02:33 +0200681 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
682 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 break;
684
685 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200686 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
687 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 break;
689
690 case HCISETPTYPE:
691 hdev->pkt_type = (__u16) dr.dev_opt;
692 break;
693
694 case HCISETLINKPOL:
695 hdev->link_policy = (__u16) dr.dev_opt;
696 break;
697
698 case HCISETLINKMODE:
699 hdev->link_mode = ((__u16) dr.dev_opt) & (HCI_LM_MASTER | HCI_LM_ACCEPT);
700 break;
701
702 case HCISETACLMTU:
703 hdev->acl_mtu = *((__u16 *)&dr.dev_opt + 1);
704 hdev->acl_pkts = *((__u16 *)&dr.dev_opt + 0);
705 break;
706
707 case HCISETSCOMTU:
708 hdev->sco_mtu = *((__u16 *)&dr.dev_opt + 1);
709 hdev->sco_pkts = *((__u16 *)&dr.dev_opt + 0);
710 break;
711
712 default:
713 err = -EINVAL;
714 break;
715 }
716 hci_dev_put(hdev);
717 return err;
718}
719
720int hci_get_dev_list(void __user *arg)
721{
722 struct hci_dev_list_req *dl;
723 struct hci_dev_req *dr;
724 struct list_head *p;
725 int n = 0, size, err;
726 __u16 dev_num;
727
728 if (get_user(dev_num, (__u16 __user *) arg))
729 return -EFAULT;
730
731 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
732 return -EINVAL;
733
734 size = sizeof(*dl) + dev_num * sizeof(*dr);
735
736 if (!(dl = kmalloc(size, GFP_KERNEL)))
737 return -ENOMEM;
738
739 dr = dl->dev_req;
740
741 read_lock_bh(&hci_dev_list_lock);
742 list_for_each(p, &hci_dev_list) {
743 struct hci_dev *hdev;
744 hdev = list_entry(p, struct hci_dev, list);
745 (dr + n)->dev_id = hdev->id;
746 (dr + n)->dev_opt = hdev->flags;
747 if (++n >= dev_num)
748 break;
749 }
750 read_unlock_bh(&hci_dev_list_lock);
751
752 dl->dev_num = n;
753 size = sizeof(*dl) + n * sizeof(*dr);
754
755 err = copy_to_user(arg, dl, size);
756 kfree(dl);
757
758 return err ? -EFAULT : 0;
759}
760
761int hci_get_dev_info(void __user *arg)
762{
763 struct hci_dev *hdev;
764 struct hci_dev_info di;
765 int err = 0;
766
767 if (copy_from_user(&di, arg, sizeof(di)))
768 return -EFAULT;
769
770 if (!(hdev = hci_dev_get(di.dev_id)))
771 return -ENODEV;
772
773 strcpy(di.name, hdev->name);
774 di.bdaddr = hdev->bdaddr;
775 di.type = hdev->type;
776 di.flags = hdev->flags;
777 di.pkt_type = hdev->pkt_type;
778 di.acl_mtu = hdev->acl_mtu;
779 di.acl_pkts = hdev->acl_pkts;
780 di.sco_mtu = hdev->sco_mtu;
781 di.sco_pkts = hdev->sco_pkts;
782 di.link_policy = hdev->link_policy;
783 di.link_mode = hdev->link_mode;
784
785 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
786 memcpy(&di.features, &hdev->features, sizeof(di.features));
787
788 if (copy_to_user(arg, &di, sizeof(di)))
789 err = -EFAULT;
790
791 hci_dev_put(hdev);
792
793 return err;
794}
795
796/* ---- Interface to HCI drivers ---- */
797
798/* Alloc HCI device */
799struct hci_dev *hci_alloc_dev(void)
800{
801 struct hci_dev *hdev;
802
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200803 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 if (!hdev)
805 return NULL;
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 skb_queue_head_init(&hdev->driver_init);
808
809 return hdev;
810}
811EXPORT_SYMBOL(hci_alloc_dev);
812
813/* Free HCI device */
814void hci_free_dev(struct hci_dev *hdev)
815{
816 skb_queue_purge(&hdev->driver_init);
817
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200818 /* will free via device release */
819 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820}
821EXPORT_SYMBOL(hci_free_dev);
822
823/* Register HCI device */
824int hci_register_dev(struct hci_dev *hdev)
825{
826 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200827 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
830
831 if (!hdev->open || !hdev->close || !hdev->destruct)
832 return -EINVAL;
833
834 write_lock_bh(&hci_dev_list_lock);
835
836 /* Find first available device id */
837 list_for_each(p, &hci_dev_list) {
838 if (list_entry(p, struct hci_dev, list)->id != id)
839 break;
840 head = p; id++;
841 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 sprintf(hdev->name, "hci%d", id);
844 hdev->id = id;
845 list_add(&hdev->list, head);
846
847 atomic_set(&hdev->refcnt, 1);
848 spin_lock_init(&hdev->lock);
849
850 hdev->flags = 0;
851 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200852 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 hdev->link_mode = (HCI_LM_ACCEPT);
854
Marcel Holtmann04837f62006-07-03 10:02:33 +0200855 hdev->idle_timeout = 0;
856 hdev->sniff_max_interval = 800;
857 hdev->sniff_min_interval = 80;
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
860 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
861 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
862
863 skb_queue_head_init(&hdev->rx_q);
864 skb_queue_head_init(&hdev->cmd_q);
865 skb_queue_head_init(&hdev->raw_q);
866
Marcel Holtmannef222012007-07-11 06:42:04 +0200867 for (i = 0; i < 3; i++)
868 hdev->reassembly[i] = NULL;
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 init_waitqueue_head(&hdev->req_wait_q);
871 init_MUTEX(&hdev->req_lock);
872
873 inquiry_cache_init(hdev);
874
875 hci_conn_hash_init(hdev);
876
877 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
878
879 atomic_set(&hdev->promisc, 0);
880
881 write_unlock_bh(&hci_dev_list_lock);
882
883 hci_register_sysfs(hdev);
884
885 hci_notify(hdev, HCI_DEV_REG);
886
887 return id;
888}
889EXPORT_SYMBOL(hci_register_dev);
890
891/* Unregister HCI device */
892int hci_unregister_dev(struct hci_dev *hdev)
893{
Marcel Holtmannef222012007-07-11 06:42:04 +0200894 int i;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
897
898 hci_unregister_sysfs(hdev);
899
900 write_lock_bh(&hci_dev_list_lock);
901 list_del(&hdev->list);
902 write_unlock_bh(&hci_dev_list_lock);
903
904 hci_dev_do_close(hdev);
905
Marcel Holtmannef222012007-07-11 06:42:04 +0200906 for (i = 0; i < 3; i++)
907 kfree_skb(hdev->reassembly[i]);
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 hci_notify(hdev, HCI_DEV_UNREG);
910
911 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +0200912
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 return 0;
914}
915EXPORT_SYMBOL(hci_unregister_dev);
916
917/* Suspend HCI device */
918int hci_suspend_dev(struct hci_dev *hdev)
919{
920 hci_notify(hdev, HCI_DEV_SUSPEND);
921 return 0;
922}
923EXPORT_SYMBOL(hci_suspend_dev);
924
925/* Resume HCI device */
926int hci_resume_dev(struct hci_dev *hdev)
927{
928 hci_notify(hdev, HCI_DEV_RESUME);
929 return 0;
930}
931EXPORT_SYMBOL(hci_resume_dev);
932
Marcel Holtmannef222012007-07-11 06:42:04 +0200933/* Receive packet type fragment */
934#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
935
936int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
937{
938 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
939 return -EILSEQ;
940
941 while (count) {
942 struct sk_buff *skb = __reassembly(hdev, type);
943 struct { int expect; } *scb;
944 int len = 0;
945
946 if (!skb) {
947 /* Start of the frame */
948
949 switch (type) {
950 case HCI_EVENT_PKT:
951 if (count >= HCI_EVENT_HDR_SIZE) {
952 struct hci_event_hdr *h = data;
953 len = HCI_EVENT_HDR_SIZE + h->plen;
954 } else
955 return -EILSEQ;
956 break;
957
958 case HCI_ACLDATA_PKT:
959 if (count >= HCI_ACL_HDR_SIZE) {
960 struct hci_acl_hdr *h = data;
961 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
962 } else
963 return -EILSEQ;
964 break;
965
966 case HCI_SCODATA_PKT:
967 if (count >= HCI_SCO_HDR_SIZE) {
968 struct hci_sco_hdr *h = data;
969 len = HCI_SCO_HDR_SIZE + h->dlen;
970 } else
971 return -EILSEQ;
972 break;
973 }
974
975 skb = bt_skb_alloc(len, GFP_ATOMIC);
976 if (!skb) {
977 BT_ERR("%s no memory for packet", hdev->name);
978 return -ENOMEM;
979 }
980
981 skb->dev = (void *) hdev;
982 bt_cb(skb)->pkt_type = type;
YOSHIFUJI Hideaki00ae02f2007-07-19 10:43:16 +0900983
Marcel Holtmannef222012007-07-11 06:42:04 +0200984 __reassembly(hdev, type) = skb;
985
986 scb = (void *) skb->cb;
987 scb->expect = len;
988 } else {
989 /* Continuation */
990
991 scb = (void *) skb->cb;
992 len = scb->expect;
993 }
994
995 len = min(len, count);
996
997 memcpy(skb_put(skb, len), data, len);
998
999 scb->expect -= len;
1000
1001 if (scb->expect == 0) {
1002 /* Complete frame */
1003
1004 __reassembly(hdev, type) = NULL;
1005
1006 bt_cb(skb)->pkt_type = type;
1007 hci_recv_frame(skb);
1008 }
1009
1010 count -= len; data += len;
1011 }
1012
1013 return 0;
1014}
1015EXPORT_SYMBOL(hci_recv_fragment);
1016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017/* ---- Interface to upper protocols ---- */
1018
1019/* Register/Unregister protocols.
1020 * hci_task_lock is used to ensure that no tasks are running. */
1021int hci_register_proto(struct hci_proto *hp)
1022{
1023 int err = 0;
1024
1025 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1026
1027 if (hp->id >= HCI_MAX_PROTO)
1028 return -EINVAL;
1029
1030 write_lock_bh(&hci_task_lock);
1031
1032 if (!hci_proto[hp->id])
1033 hci_proto[hp->id] = hp;
1034 else
1035 err = -EEXIST;
1036
1037 write_unlock_bh(&hci_task_lock);
1038
1039 return err;
1040}
1041EXPORT_SYMBOL(hci_register_proto);
1042
1043int hci_unregister_proto(struct hci_proto *hp)
1044{
1045 int err = 0;
1046
1047 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1048
1049 if (hp->id >= HCI_MAX_PROTO)
1050 return -EINVAL;
1051
1052 write_lock_bh(&hci_task_lock);
1053
1054 if (hci_proto[hp->id])
1055 hci_proto[hp->id] = NULL;
1056 else
1057 err = -ENOENT;
1058
1059 write_unlock_bh(&hci_task_lock);
1060
1061 return err;
1062}
1063EXPORT_SYMBOL(hci_unregister_proto);
1064
1065int hci_register_cb(struct hci_cb *cb)
1066{
1067 BT_DBG("%p name %s", cb, cb->name);
1068
1069 write_lock_bh(&hci_cb_list_lock);
1070 list_add(&cb->list, &hci_cb_list);
1071 write_unlock_bh(&hci_cb_list_lock);
1072
1073 return 0;
1074}
1075EXPORT_SYMBOL(hci_register_cb);
1076
1077int hci_unregister_cb(struct hci_cb *cb)
1078{
1079 BT_DBG("%p name %s", cb, cb->name);
1080
1081 write_lock_bh(&hci_cb_list_lock);
1082 list_del(&cb->list);
1083 write_unlock_bh(&hci_cb_list_lock);
1084
1085 return 0;
1086}
1087EXPORT_SYMBOL(hci_unregister_cb);
1088
1089static int hci_send_frame(struct sk_buff *skb)
1090{
1091 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1092
1093 if (!hdev) {
1094 kfree_skb(skb);
1095 return -ENODEV;
1096 }
1097
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001098 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099
1100 if (atomic_read(&hdev->promisc)) {
1101 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001102 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
1104 hci_send_to_sock(hdev, skb);
1105 }
1106
1107 /* Get rid of skb owner, prior to sending to the driver. */
1108 skb_orphan(skb);
1109
1110 return hdev->send(skb);
1111}
1112
1113/* Send HCI command */
1114int hci_send_cmd(struct hci_dev *hdev, __u16 ogf, __u16 ocf, __u32 plen, void *param)
1115{
1116 int len = HCI_COMMAND_HDR_SIZE + plen;
1117 struct hci_command_hdr *hdr;
1118 struct sk_buff *skb;
1119
1120 BT_DBG("%s ogf 0x%x ocf 0x%x plen %d", hdev->name, ogf, ocf, plen);
1121
1122 skb = bt_skb_alloc(len, GFP_ATOMIC);
1123 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001124 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 return -ENOMEM;
1126 }
1127
1128 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001129 hdr->opcode = cpu_to_le16(hci_opcode_pack(ogf, ocf));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 hdr->plen = plen;
1131
1132 if (plen)
1133 memcpy(skb_put(skb, plen), param, plen);
1134
1135 BT_DBG("skb len %d", skb->len);
1136
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001137 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 skb->dev = (void *) hdev;
1139 skb_queue_tail(&hdev->cmd_q, skb);
1140 hci_sched_cmd(hdev);
1141
1142 return 0;
1143}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145/* Get data from the previously sent command */
1146void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 ogf, __u16 ocf)
1147{
1148 struct hci_command_hdr *hdr;
1149
1150 if (!hdev->sent_cmd)
1151 return NULL;
1152
1153 hdr = (void *) hdev->sent_cmd->data;
1154
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001155 if (hdr->opcode != cpu_to_le16(hci_opcode_pack(ogf, ocf)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return NULL;
1157
1158 BT_DBG("%s ogf 0x%x ocf 0x%x", hdev->name, ogf, ocf);
1159
1160 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1161}
1162
1163/* Send ACL data */
1164static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1165{
1166 struct hci_acl_hdr *hdr;
1167 int len = skb->len;
1168
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001169 skb_push(skb, HCI_ACL_HDR_SIZE);
1170 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001171 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001172 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1173 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174}
1175
1176int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1177{
1178 struct hci_dev *hdev = conn->hdev;
1179 struct sk_buff *list;
1180
1181 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1182
1183 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001184 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1186
1187 if (!(list = skb_shinfo(skb)->frag_list)) {
1188 /* Non fragmented */
1189 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1190
1191 skb_queue_tail(&conn->data_q, skb);
1192 } else {
1193 /* Fragmented */
1194 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1195
1196 skb_shinfo(skb)->frag_list = NULL;
1197
1198 /* Queue all fragments atomically */
1199 spin_lock_bh(&conn->data_q.lock);
1200
1201 __skb_queue_tail(&conn->data_q, skb);
1202 do {
1203 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001204
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001206 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1208
1209 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1210
1211 __skb_queue_tail(&conn->data_q, skb);
1212 } while (list);
1213
1214 spin_unlock_bh(&conn->data_q.lock);
1215 }
1216
1217 hci_sched_tx(hdev);
1218 return 0;
1219}
1220EXPORT_SYMBOL(hci_send_acl);
1221
1222/* Send SCO data */
1223int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1224{
1225 struct hci_dev *hdev = conn->hdev;
1226 struct hci_sco_hdr hdr;
1227
1228 BT_DBG("%s len %d", hdev->name, skb->len);
1229
1230 if (skb->len > hdev->sco_mtu) {
1231 kfree_skb(skb);
1232 return -EINVAL;
1233 }
1234
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001235 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 hdr.dlen = skb->len;
1237
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001238 skb_push(skb, HCI_SCO_HDR_SIZE);
1239 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001240 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001243 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 skb_queue_tail(&conn->data_q, skb);
1245 hci_sched_tx(hdev);
1246 return 0;
1247}
1248EXPORT_SYMBOL(hci_send_sco);
1249
1250/* ---- HCI TX task (outgoing data) ---- */
1251
1252/* HCI Connection scheduler */
1253static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1254{
1255 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001256 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 int num = 0, min = ~0;
1258 struct list_head *p;
1259
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001260 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 * added and removed with TX task disabled. */
1262 list_for_each(p, &h->list) {
1263 struct hci_conn *c;
1264 c = list_entry(p, struct hci_conn, list);
1265
1266 if (c->type != type || c->state != BT_CONNECTED
1267 || skb_queue_empty(&c->data_q))
1268 continue;
1269 num++;
1270
1271 if (c->sent < min) {
1272 min = c->sent;
1273 conn = c;
1274 }
1275 }
1276
1277 if (conn) {
1278 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1279 int q = cnt / num;
1280 *quote = q ? q : 1;
1281 } else
1282 *quote = 0;
1283
1284 BT_DBG("conn %p quote %d", conn, *quote);
1285 return conn;
1286}
1287
1288static inline void hci_acl_tx_to(struct hci_dev *hdev)
1289{
1290 struct hci_conn_hash *h = &hdev->conn_hash;
1291 struct list_head *p;
1292 struct hci_conn *c;
1293
1294 BT_ERR("%s ACL tx timeout", hdev->name);
1295
1296 /* Kill stalled connections */
1297 list_for_each(p, &h->list) {
1298 c = list_entry(p, struct hci_conn, list);
1299 if (c->type == ACL_LINK && c->sent) {
1300 BT_ERR("%s killing stalled ACL connection %s",
1301 hdev->name, batostr(&c->dst));
1302 hci_acl_disconn(c, 0x13);
1303 }
1304 }
1305}
1306
1307static inline void hci_sched_acl(struct hci_dev *hdev)
1308{
1309 struct hci_conn *conn;
1310 struct sk_buff *skb;
1311 int quote;
1312
1313 BT_DBG("%s", hdev->name);
1314
1315 if (!test_bit(HCI_RAW, &hdev->flags)) {
1316 /* ACL tx timeout must be longer than maximum
1317 * link supervision timeout (40.9 seconds) */
1318 if (!hdev->acl_cnt && (jiffies - hdev->acl_last_tx) > (HZ * 45))
1319 hci_acl_tx_to(hdev);
1320 }
1321
1322 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1323 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1324 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001325
1326 hci_conn_enter_active_mode(conn);
1327
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 hci_send_frame(skb);
1329 hdev->acl_last_tx = jiffies;
1330
1331 hdev->acl_cnt--;
1332 conn->sent++;
1333 }
1334 }
1335}
1336
1337/* Schedule SCO */
1338static inline void hci_sched_sco(struct hci_dev *hdev)
1339{
1340 struct hci_conn *conn;
1341 struct sk_buff *skb;
1342 int quote;
1343
1344 BT_DBG("%s", hdev->name);
1345
1346 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1347 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1348 BT_DBG("skb %p len %d", skb, skb->len);
1349 hci_send_frame(skb);
1350
1351 conn->sent++;
1352 if (conn->sent == ~0)
1353 conn->sent = 0;
1354 }
1355 }
1356}
1357
1358static void hci_tx_task(unsigned long arg)
1359{
1360 struct hci_dev *hdev = (struct hci_dev *) arg;
1361 struct sk_buff *skb;
1362
1363 read_lock(&hci_task_lock);
1364
1365 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1366
1367 /* Schedule queues and send stuff to HCI driver */
1368
1369 hci_sched_acl(hdev);
1370
1371 hci_sched_sco(hdev);
1372
1373 /* Send next queued raw (unknown type) packet */
1374 while ((skb = skb_dequeue(&hdev->raw_q)))
1375 hci_send_frame(skb);
1376
1377 read_unlock(&hci_task_lock);
1378}
1379
1380/* ----- HCI RX task (incoming data proccessing) ----- */
1381
1382/* ACL data packet */
1383static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1384{
1385 struct hci_acl_hdr *hdr = (void *) skb->data;
1386 struct hci_conn *conn;
1387 __u16 handle, flags;
1388
1389 skb_pull(skb, HCI_ACL_HDR_SIZE);
1390
1391 handle = __le16_to_cpu(hdr->handle);
1392 flags = hci_flags(handle);
1393 handle = hci_handle(handle);
1394
1395 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1396
1397 hdev->stat.acl_rx++;
1398
1399 hci_dev_lock(hdev);
1400 conn = hci_conn_hash_lookup_handle(hdev, handle);
1401 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 if (conn) {
1404 register struct hci_proto *hp;
1405
Marcel Holtmann04837f62006-07-03 10:02:33 +02001406 hci_conn_enter_active_mode(conn);
1407
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 /* Send to upper protocol */
1409 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1410 hp->recv_acldata(conn, skb, flags);
1411 return;
1412 }
1413 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001414 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 hdev->name, handle);
1416 }
1417
1418 kfree_skb(skb);
1419}
1420
1421/* SCO data packet */
1422static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1423{
1424 struct hci_sco_hdr *hdr = (void *) skb->data;
1425 struct hci_conn *conn;
1426 __u16 handle;
1427
1428 skb_pull(skb, HCI_SCO_HDR_SIZE);
1429
1430 handle = __le16_to_cpu(hdr->handle);
1431
1432 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1433
1434 hdev->stat.sco_rx++;
1435
1436 hci_dev_lock(hdev);
1437 conn = hci_conn_hash_lookup_handle(hdev, handle);
1438 hci_dev_unlock(hdev);
1439
1440 if (conn) {
1441 register struct hci_proto *hp;
1442
1443 /* Send to upper protocol */
1444 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1445 hp->recv_scodata(conn, skb);
1446 return;
1447 }
1448 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001449 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 hdev->name, handle);
1451 }
1452
1453 kfree_skb(skb);
1454}
1455
Marcel Holtmann65164552005-10-28 19:20:48 +02001456static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457{
1458 struct hci_dev *hdev = (struct hci_dev *) arg;
1459 struct sk_buff *skb;
1460
1461 BT_DBG("%s", hdev->name);
1462
1463 read_lock(&hci_task_lock);
1464
1465 while ((skb = skb_dequeue(&hdev->rx_q))) {
1466 if (atomic_read(&hdev->promisc)) {
1467 /* Send copy to the sockets */
1468 hci_send_to_sock(hdev, skb);
1469 }
1470
1471 if (test_bit(HCI_RAW, &hdev->flags)) {
1472 kfree_skb(skb);
1473 continue;
1474 }
1475
1476 if (test_bit(HCI_INIT, &hdev->flags)) {
1477 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001478 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 case HCI_ACLDATA_PKT:
1480 case HCI_SCODATA_PKT:
1481 kfree_skb(skb);
1482 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 }
1485
1486 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001487 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 case HCI_EVENT_PKT:
1489 hci_event_packet(hdev, skb);
1490 break;
1491
1492 case HCI_ACLDATA_PKT:
1493 BT_DBG("%s ACL data packet", hdev->name);
1494 hci_acldata_packet(hdev, skb);
1495 break;
1496
1497 case HCI_SCODATA_PKT:
1498 BT_DBG("%s SCO data packet", hdev->name);
1499 hci_scodata_packet(hdev, skb);
1500 break;
1501
1502 default:
1503 kfree_skb(skb);
1504 break;
1505 }
1506 }
1507
1508 read_unlock(&hci_task_lock);
1509}
1510
1511static void hci_cmd_task(unsigned long arg)
1512{
1513 struct hci_dev *hdev = (struct hci_dev *) arg;
1514 struct sk_buff *skb;
1515
1516 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1517
1518 if (!atomic_read(&hdev->cmd_cnt) && (jiffies - hdev->cmd_last_tx) > HZ) {
1519 BT_ERR("%s command tx timeout", hdev->name);
1520 atomic_set(&hdev->cmd_cnt, 1);
1521 }
1522
1523 /* Send queued commands */
1524 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1525 if (hdev->sent_cmd)
1526 kfree_skb(hdev->sent_cmd);
1527
1528 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1529 atomic_dec(&hdev->cmd_cnt);
1530 hci_send_frame(skb);
1531 hdev->cmd_last_tx = jiffies;
1532 } else {
1533 skb_queue_head(&hdev->cmd_q, skb);
1534 hci_sched_cmd(hdev);
1535 }
1536 }
1537}