blob: bc2a052e518b37518eaf9e128ce418d195bd91e1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
47#include <asm/uaccess.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68/* HCI protocols */
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, int result)
95{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
102 }
103}
104
105static void hci_req_cancel(struct hci_dev *hdev, int err)
106{
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 unsigned long opt, __u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
122
123 BT_DBG("%s start", hdev->name);
124
125 hdev->req_status = HCI_REQ_PEND;
126
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 req(hdev, opt);
131 schedule_timeout(timeout);
132
133 remove_wait_queue(&hdev->req_wait_q, &wait);
134
135 if (signal_pending(current))
136 return -EINTR;
137
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
142
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
146
147 default:
148 err = -ETIMEDOUT;
149 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 hdev->req_status = hdev->req_result = 0;
153
154 BT_DBG("%s end: err %d", hdev->name, err);
155
156 return err;
157}
158
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
161{
162 int ret;
163
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
171
172 return ret;
173}
174
175static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176{
177 BT_DBG("%s %ld", hdev->name, opt);
178
179 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{
185 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800186 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200187 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Driver initialization */
192
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100199 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 }
201 skb_queue_purge(&hdev->driver_init);
202
203 /* Mandatory initialization */
204
205 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#if 0
219 /* Host buffer size */
220 {
221 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228#endif
229
230 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* Optional initialization */
243
244 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200245 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700249 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258{
259 __u8 scan = opt;
260
261 BT_DBG("%s %x", hdev->name, scan);
262
263 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268{
269 __u8 auth = opt;
270
271 BT_DBG("%s %x", hdev->name, auth);
272
273 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
277static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 encrypt = opt;
280
281 BT_DBG("%s %x", hdev->name, encrypt);
282
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200283 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200287static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __le16 policy = cpu_to_le16(opt);
290
Marcel Holtmanna418b892008-11-30 12:17:28 +0100291 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295}
296
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900297/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 * Device is held on return. */
299struct hci_dev *hci_dev_get(int index)
300{
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
303
304 BT_DBG("%d", index);
305
306 if (index < 0)
307 return NULL;
308
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
315 }
316 }
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
319}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/* ---- Inquiry support ---- */
322static void inquiry_cache_flush(struct hci_dev *hdev)
323{
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
326
327 BT_DBG("cache %p", cache);
328
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
333 }
334}
335
336struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337{
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
340
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
347}
348
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
353
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 e->next = cache->list;
361 cache->list = e;
362 }
363
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
367}
368
369static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
374 int copied = 0;
375
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
384 info++;
385 }
386
387 BT_DBG("cache %p, copied %d", cache, copied);
388 return copied;
389}
390
391static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392{
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
395
396 BT_DBG("%s", hdev->name);
397
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 return;
400
401 /* Start Inquiry */
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
408int hci_inquiry(void __user *arg)
409{
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
414 long timeo;
415 __u8 *buf;
416
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT;
419
420 if (!(hdev = hci_dev_get(ir.dev_id)))
421 return -ENODEV;
422
423 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
428 do_inquiry = 1;
429 }
430 hci_dev_unlock_bh(hdev);
431
Marcel Holtmann04837f62006-07-03 10:02:33 +0200432 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434 goto done;
435
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
441 */
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443 err = -ENOMEM;
444 goto done;
445 }
446
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
450
451 BT_DBG("num_rsp %d", ir.num_rsp);
452
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454 ptr += sizeof(ir);
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 ir.num_rsp))
457 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900458 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 err = -EFAULT;
460
461 kfree(buf);
462
463done:
464 hci_dev_put(hdev);
465 return err;
466}
467
468/* ---- HCI ioctl helpers ---- */
469
470int hci_dev_open(__u16 dev)
471{
472 struct hci_dev *hdev;
473 int ret = 0;
474
475 if (!(hdev = hci_dev_get(dev)))
476 return -ENODEV;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_lock(hdev);
481
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483 ret = -ERFKILL;
484 goto done;
485 }
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (test_bit(HCI_UP, &hdev->flags)) {
488 ret = -EALREADY;
489 goto done;
490 }
491
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
494
Marcel Holtmann943da252010-02-13 02:28:41 +0100495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (hdev->open(hdev)) {
500 ret = -EIO;
501 goto done;
502 }
503
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
507
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512 clear_bit(HCI_INIT, &hdev->flags);
513 }
514
515 if (!ret) {
516 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900519 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
524
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
527
528 if (hdev->flush)
529 hdev->flush(hdev);
530
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
534 }
535
536 hdev->close(hdev);
537 hdev->flags = 0;
538 }
539
540done:
541 hci_req_unlock(hdev);
542 hci_dev_put(hdev);
543 return ret;
544}
545
546static int hci_dev_do_close(struct hci_dev *hdev)
547{
548 BT_DBG("%s %p", hdev->name, hdev);
549
550 hci_req_cancel(hdev, ENODEV);
551 hci_req_lock(hdev);
552
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
555 return 0;
556 }
557
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
561
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
565 hci_dev_unlock_bh(hdev);
566
567 hci_notify(hdev, HCI_DEV_DOWN);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 /* Reset device */
573 skb_queue_purge(&hdev->cmd_q);
574 atomic_set(&hdev->cmd_cnt, 1);
575 if (!test_bit(HCI_RAW, &hdev->flags)) {
576 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200577 __hci_request(hdev, hci_reset_req, 0,
578 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 clear_bit(HCI_INIT, &hdev->flags);
580 }
581
582 /* Kill cmd task */
583 tasklet_kill(&hdev->cmd_task);
584
585 /* Drop queues */
586 skb_queue_purge(&hdev->rx_q);
587 skb_queue_purge(&hdev->cmd_q);
588 skb_queue_purge(&hdev->raw_q);
589
590 /* Drop last sent command */
591 if (hdev->sent_cmd) {
592 kfree_skb(hdev->sent_cmd);
593 hdev->sent_cmd = NULL;
594 }
595
596 /* After this point our queues are empty
597 * and no tasks are scheduled. */
598 hdev->close(hdev);
599
600 /* Clear flags */
601 hdev->flags = 0;
602
603 hci_req_unlock(hdev);
604
605 hci_dev_put(hdev);
606 return 0;
607}
608
609int hci_dev_close(__u16 dev)
610{
611 struct hci_dev *hdev;
612 int err;
613
614 if (!(hdev = hci_dev_get(dev)))
615 return -ENODEV;
616 err = hci_dev_do_close(hdev);
617 hci_dev_put(hdev);
618 return err;
619}
620
621int hci_dev_reset(__u16 dev)
622{
623 struct hci_dev *hdev;
624 int ret = 0;
625
626 if (!(hdev = hci_dev_get(dev)))
627 return -ENODEV;
628
629 hci_req_lock(hdev);
630 tasklet_disable(&hdev->tx_task);
631
632 if (!test_bit(HCI_UP, &hdev->flags))
633 goto done;
634
635 /* Drop queues */
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
638
639 hci_dev_lock_bh(hdev);
640 inquiry_cache_flush(hdev);
641 hci_conn_hash_flush(hdev);
642 hci_dev_unlock_bh(hdev);
643
644 if (hdev->flush)
645 hdev->flush(hdev);
646
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900647 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
649
650 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200651 ret = __hci_request(hdev, hci_reset_req, 0,
652 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653
654done:
655 tasklet_enable(&hdev->tx_task);
656 hci_req_unlock(hdev);
657 hci_dev_put(hdev);
658 return ret;
659}
660
661int hci_dev_reset_stat(__u16 dev)
662{
663 struct hci_dev *hdev;
664 int ret = 0;
665
666 if (!(hdev = hci_dev_get(dev)))
667 return -ENODEV;
668
669 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
670
671 hci_dev_put(hdev);
672
673 return ret;
674}
675
676int hci_dev_cmd(unsigned int cmd, void __user *arg)
677{
678 struct hci_dev *hdev;
679 struct hci_dev_req dr;
680 int err = 0;
681
682 if (copy_from_user(&dr, arg, sizeof(dr)))
683 return -EFAULT;
684
685 if (!(hdev = hci_dev_get(dr.dev_id)))
686 return -ENODEV;
687
688 switch (cmd) {
689 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 break;
693
694 case HCISETENCRYPT:
695 if (!lmp_encrypt_capable(hdev)) {
696 err = -EOPNOTSUPP;
697 break;
698 }
699
700 if (!test_bit(HCI_AUTH, &hdev->flags)) {
701 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 if (err)
705 break;
706 }
707
Marcel Holtmann04837f62006-07-03 10:02:33 +0200708 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 break;
711
712 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200713 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 break;
716
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200717 case HCISETLINKPOL:
718 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT));
720 break;
721
722 case HCISETLINKMODE:
723 hdev->link_mode = ((__u16) dr.dev_opt) &
724 (HCI_LM_MASTER | HCI_LM_ACCEPT);
725 break;
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 case HCISETPTYPE:
728 hdev->pkt_type = (__u16) dr.dev_opt;
729 break;
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200732 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 break;
735
736 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200737 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
738 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 break;
740
741 default:
742 err = -EINVAL;
743 break;
744 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200745
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 hci_dev_put(hdev);
747 return err;
748}
749
750int hci_get_dev_list(void __user *arg)
751{
752 struct hci_dev_list_req *dl;
753 struct hci_dev_req *dr;
754 struct list_head *p;
755 int n = 0, size, err;
756 __u16 dev_num;
757
758 if (get_user(dev_num, (__u16 __user *) arg))
759 return -EFAULT;
760
761 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
762 return -EINVAL;
763
764 size = sizeof(*dl) + dev_num * sizeof(*dr);
765
Vegard Nossumc6bf5142008-11-30 12:17:19 +0100766 if (!(dl = kzalloc(size, GFP_KERNEL)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 return -ENOMEM;
768
769 dr = dl->dev_req;
770
771 read_lock_bh(&hci_dev_list_lock);
772 list_for_each(p, &hci_dev_list) {
773 struct hci_dev *hdev;
774 hdev = list_entry(p, struct hci_dev, list);
775 (dr + n)->dev_id = hdev->id;
776 (dr + n)->dev_opt = hdev->flags;
777 if (++n >= dev_num)
778 break;
779 }
780 read_unlock_bh(&hci_dev_list_lock);
781
782 dl->dev_num = n;
783 size = sizeof(*dl) + n * sizeof(*dr);
784
785 err = copy_to_user(arg, dl, size);
786 kfree(dl);
787
788 return err ? -EFAULT : 0;
789}
790
791int hci_get_dev_info(void __user *arg)
792{
793 struct hci_dev *hdev;
794 struct hci_dev_info di;
795 int err = 0;
796
797 if (copy_from_user(&di, arg, sizeof(di)))
798 return -EFAULT;
799
800 if (!(hdev = hci_dev_get(di.dev_id)))
801 return -ENODEV;
802
803 strcpy(di.name, hdev->name);
804 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100805 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 di.flags = hdev->flags;
807 di.pkt_type = hdev->pkt_type;
808 di.acl_mtu = hdev->acl_mtu;
809 di.acl_pkts = hdev->acl_pkts;
810 di.sco_mtu = hdev->sco_mtu;
811 di.sco_pkts = hdev->sco_pkts;
812 di.link_policy = hdev->link_policy;
813 di.link_mode = hdev->link_mode;
814
815 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
816 memcpy(&di.features, &hdev->features, sizeof(di.features));
817
818 if (copy_to_user(arg, &di, sizeof(di)))
819 err = -EFAULT;
820
821 hci_dev_put(hdev);
822
823 return err;
824}
825
826/* ---- Interface to HCI drivers ---- */
827
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200828static int hci_rfkill_set_block(void *data, bool blocked)
829{
830 struct hci_dev *hdev = data;
831
832 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
833
834 if (!blocked)
835 return 0;
836
837 hci_dev_do_close(hdev);
838
839 return 0;
840}
841
842static const struct rfkill_ops hci_rfkill_ops = {
843 .set_block = hci_rfkill_set_block,
844};
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846/* Alloc HCI device */
847struct hci_dev *hci_alloc_dev(void)
848{
849 struct hci_dev *hdev;
850
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200851 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 if (!hdev)
853 return NULL;
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 skb_queue_head_init(&hdev->driver_init);
856
857 return hdev;
858}
859EXPORT_SYMBOL(hci_alloc_dev);
860
861/* Free HCI device */
862void hci_free_dev(struct hci_dev *hdev)
863{
864 skb_queue_purge(&hdev->driver_init);
865
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200866 /* will free via device release */
867 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868}
869EXPORT_SYMBOL(hci_free_dev);
870
871/* Register HCI device */
872int hci_register_dev(struct hci_dev *hdev)
873{
874 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200875 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100877 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
878 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
880 if (!hdev->open || !hdev->close || !hdev->destruct)
881 return -EINVAL;
882
883 write_lock_bh(&hci_dev_list_lock);
884
885 /* Find first available device id */
886 list_for_each(p, &hci_dev_list) {
887 if (list_entry(p, struct hci_dev, list)->id != id)
888 break;
889 head = p; id++;
890 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900891
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 sprintf(hdev->name, "hci%d", id);
893 hdev->id = id;
894 list_add(&hdev->list, head);
895
896 atomic_set(&hdev->refcnt, 1);
897 spin_lock_init(&hdev->lock);
898
899 hdev->flags = 0;
900 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200901 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 hdev->link_mode = (HCI_LM_ACCEPT);
903
Marcel Holtmann04837f62006-07-03 10:02:33 +0200904 hdev->idle_timeout = 0;
905 hdev->sniff_max_interval = 800;
906 hdev->sniff_min_interval = 80;
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
909 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
910 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
911
912 skb_queue_head_init(&hdev->rx_q);
913 skb_queue_head_init(&hdev->cmd_q);
914 skb_queue_head_init(&hdev->raw_q);
915
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530916 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200917 hdev->reassembly[i] = NULL;
918
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000920 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 inquiry_cache_init(hdev);
923
924 hci_conn_hash_init(hdev);
925
David Millerea4bd8b2010-07-30 21:54:49 -0700926 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +0200927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
929
930 atomic_set(&hdev->promisc, 0);
931
932 write_unlock_bh(&hci_dev_list_lock);
933
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100934 hdev->workqueue = create_singlethread_workqueue(hdev->name);
935 if (!hdev->workqueue)
936 goto nomem;
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 hci_register_sysfs(hdev);
939
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200940 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
941 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
942 if (hdev->rfkill) {
943 if (rfkill_register(hdev->rfkill) < 0) {
944 rfkill_destroy(hdev->rfkill);
945 hdev->rfkill = NULL;
946 }
947 }
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 hci_notify(hdev, HCI_DEV_REG);
950
951 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100952
953nomem:
954 write_lock_bh(&hci_dev_list_lock);
955 list_del(&hdev->list);
956 write_unlock_bh(&hci_dev_list_lock);
957
958 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960EXPORT_SYMBOL(hci_register_dev);
961
962/* Unregister HCI device */
963int hci_unregister_dev(struct hci_dev *hdev)
964{
Marcel Holtmannef222012007-07-11 06:42:04 +0200965 int i;
966
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100967 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 write_lock_bh(&hci_dev_list_lock);
970 list_del(&hdev->list);
971 write_unlock_bh(&hci_dev_list_lock);
972
973 hci_dev_do_close(hdev);
974
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530975 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200976 kfree_skb(hdev->reassembly[i]);
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 hci_notify(hdev, HCI_DEV_UNREG);
979
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200980 if (hdev->rfkill) {
981 rfkill_unregister(hdev->rfkill);
982 rfkill_destroy(hdev->rfkill);
983 }
984
Dave Young147e2d52008-03-05 18:45:59 -0800985 hci_unregister_sysfs(hdev);
986
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100987 destroy_workqueue(hdev->workqueue);
988
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +0200990
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return 0;
992}
993EXPORT_SYMBOL(hci_unregister_dev);
994
995/* Suspend HCI device */
996int hci_suspend_dev(struct hci_dev *hdev)
997{
998 hci_notify(hdev, HCI_DEV_SUSPEND);
999 return 0;
1000}
1001EXPORT_SYMBOL(hci_suspend_dev);
1002
1003/* Resume HCI device */
1004int hci_resume_dev(struct hci_dev *hdev)
1005{
1006 hci_notify(hdev, HCI_DEV_RESUME);
1007 return 0;
1008}
1009EXPORT_SYMBOL(hci_resume_dev);
1010
Marcel Holtmann76bca882009-11-18 00:40:39 +01001011/* Receive frame from HCI drivers */
1012int hci_recv_frame(struct sk_buff *skb)
1013{
1014 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1015 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1016 && !test_bit(HCI_INIT, &hdev->flags))) {
1017 kfree_skb(skb);
1018 return -ENXIO;
1019 }
1020
1021 /* Incomming skb */
1022 bt_cb(skb)->incoming = 1;
1023
1024 /* Time stamp */
1025 __net_timestamp(skb);
1026
1027 /* Queue frame for rx task */
1028 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001029 tasklet_schedule(&hdev->rx_task);
1030
Marcel Holtmann76bca882009-11-18 00:40:39 +01001031 return 0;
1032}
1033EXPORT_SYMBOL(hci_recv_frame);
1034
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301035static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1036 int count, __u8 index, gfp_t gfp_mask)
1037{
1038 int len = 0;
1039 int hlen = 0;
1040 int remain = count;
1041 struct sk_buff *skb;
1042 struct bt_skb_cb *scb;
1043
1044 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1045 index >= NUM_REASSEMBLY)
1046 return -EILSEQ;
1047
1048 skb = hdev->reassembly[index];
1049
1050 if (!skb) {
1051 switch (type) {
1052 case HCI_ACLDATA_PKT:
1053 len = HCI_MAX_FRAME_SIZE;
1054 hlen = HCI_ACL_HDR_SIZE;
1055 break;
1056 case HCI_EVENT_PKT:
1057 len = HCI_MAX_EVENT_SIZE;
1058 hlen = HCI_EVENT_HDR_SIZE;
1059 break;
1060 case HCI_SCODATA_PKT:
1061 len = HCI_MAX_SCO_SIZE;
1062 hlen = HCI_SCO_HDR_SIZE;
1063 break;
1064 }
1065
1066 skb = bt_skb_alloc(len, gfp_mask);
1067 if (!skb)
1068 return -ENOMEM;
1069
1070 scb = (void *) skb->cb;
1071 scb->expect = hlen;
1072 scb->pkt_type = type;
1073
1074 skb->dev = (void *) hdev;
1075 hdev->reassembly[index] = skb;
1076 }
1077
1078 while (count) {
1079 scb = (void *) skb->cb;
1080 len = min(scb->expect, (__u16)count);
1081
1082 memcpy(skb_put(skb, len), data, len);
1083
1084 count -= len;
1085 data += len;
1086 scb->expect -= len;
1087 remain = count;
1088
1089 switch (type) {
1090 case HCI_EVENT_PKT:
1091 if (skb->len == HCI_EVENT_HDR_SIZE) {
1092 struct hci_event_hdr *h = hci_event_hdr(skb);
1093 scb->expect = h->plen;
1094
1095 if (skb_tailroom(skb) < scb->expect) {
1096 kfree_skb(skb);
1097 hdev->reassembly[index] = NULL;
1098 return -ENOMEM;
1099 }
1100 }
1101 break;
1102
1103 case HCI_ACLDATA_PKT:
1104 if (skb->len == HCI_ACL_HDR_SIZE) {
1105 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1106 scb->expect = __le16_to_cpu(h->dlen);
1107
1108 if (skb_tailroom(skb) < scb->expect) {
1109 kfree_skb(skb);
1110 hdev->reassembly[index] = NULL;
1111 return -ENOMEM;
1112 }
1113 }
1114 break;
1115
1116 case HCI_SCODATA_PKT:
1117 if (skb->len == HCI_SCO_HDR_SIZE) {
1118 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1119 scb->expect = h->dlen;
1120
1121 if (skb_tailroom(skb) < scb->expect) {
1122 kfree_skb(skb);
1123 hdev->reassembly[index] = NULL;
1124 return -ENOMEM;
1125 }
1126 }
1127 break;
1128 }
1129
1130 if (scb->expect == 0) {
1131 /* Complete frame */
1132
1133 bt_cb(skb)->pkt_type = type;
1134 hci_recv_frame(skb);
1135
1136 hdev->reassembly[index] = NULL;
1137 return remain;
1138 }
1139 }
1140
1141 return remain;
1142}
1143
Marcel Holtmannef222012007-07-11 06:42:04 +02001144int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1145{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301146 int rem = 0;
1147
Marcel Holtmannef222012007-07-11 06:42:04 +02001148 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1149 return -EILSEQ;
1150
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001151 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301152 rem = hci_reassembly(hdev, type, data, count,
1153 type - 1, GFP_ATOMIC);
1154 if (rem < 0)
1155 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001156
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301157 data += (count - rem);
1158 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001159 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001160
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301161 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001162}
1163EXPORT_SYMBOL(hci_recv_fragment);
1164
Suraj Sumangala99811512010-07-14 13:02:19 +05301165#define STREAM_REASSEMBLY 0
1166
1167int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1168{
1169 int type;
1170 int rem = 0;
1171
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001172 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301173 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1174
1175 if (!skb) {
1176 struct { char type; } *pkt;
1177
1178 /* Start of the frame */
1179 pkt = data;
1180 type = pkt->type;
1181
1182 data++;
1183 count--;
1184 } else
1185 type = bt_cb(skb)->pkt_type;
1186
1187 rem = hci_reassembly(hdev, type, data,
1188 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1189 if (rem < 0)
1190 return rem;
1191
1192 data += (count - rem);
1193 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001194 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301195
1196 return rem;
1197}
1198EXPORT_SYMBOL(hci_recv_stream_fragment);
1199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200/* ---- Interface to upper protocols ---- */
1201
1202/* Register/Unregister protocols.
1203 * hci_task_lock is used to ensure that no tasks are running. */
1204int hci_register_proto(struct hci_proto *hp)
1205{
1206 int err = 0;
1207
1208 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1209
1210 if (hp->id >= HCI_MAX_PROTO)
1211 return -EINVAL;
1212
1213 write_lock_bh(&hci_task_lock);
1214
1215 if (!hci_proto[hp->id])
1216 hci_proto[hp->id] = hp;
1217 else
1218 err = -EEXIST;
1219
1220 write_unlock_bh(&hci_task_lock);
1221
1222 return err;
1223}
1224EXPORT_SYMBOL(hci_register_proto);
1225
1226int hci_unregister_proto(struct hci_proto *hp)
1227{
1228 int err = 0;
1229
1230 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1231
1232 if (hp->id >= HCI_MAX_PROTO)
1233 return -EINVAL;
1234
1235 write_lock_bh(&hci_task_lock);
1236
1237 if (hci_proto[hp->id])
1238 hci_proto[hp->id] = NULL;
1239 else
1240 err = -ENOENT;
1241
1242 write_unlock_bh(&hci_task_lock);
1243
1244 return err;
1245}
1246EXPORT_SYMBOL(hci_unregister_proto);
1247
1248int hci_register_cb(struct hci_cb *cb)
1249{
1250 BT_DBG("%p name %s", cb, cb->name);
1251
1252 write_lock_bh(&hci_cb_list_lock);
1253 list_add(&cb->list, &hci_cb_list);
1254 write_unlock_bh(&hci_cb_list_lock);
1255
1256 return 0;
1257}
1258EXPORT_SYMBOL(hci_register_cb);
1259
1260int hci_unregister_cb(struct hci_cb *cb)
1261{
1262 BT_DBG("%p name %s", cb, cb->name);
1263
1264 write_lock_bh(&hci_cb_list_lock);
1265 list_del(&cb->list);
1266 write_unlock_bh(&hci_cb_list_lock);
1267
1268 return 0;
1269}
1270EXPORT_SYMBOL(hci_unregister_cb);
1271
1272static int hci_send_frame(struct sk_buff *skb)
1273{
1274 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1275
1276 if (!hdev) {
1277 kfree_skb(skb);
1278 return -ENODEV;
1279 }
1280
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001281 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
1283 if (atomic_read(&hdev->promisc)) {
1284 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001285 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286
1287 hci_send_to_sock(hdev, skb);
1288 }
1289
1290 /* Get rid of skb owner, prior to sending to the driver. */
1291 skb_orphan(skb);
1292
1293 return hdev->send(skb);
1294}
1295
1296/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001297int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298{
1299 int len = HCI_COMMAND_HDR_SIZE + plen;
1300 struct hci_command_hdr *hdr;
1301 struct sk_buff *skb;
1302
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001303 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305 skb = bt_skb_alloc(len, GFP_ATOMIC);
1306 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001307 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 return -ENOMEM;
1309 }
1310
1311 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001312 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 hdr->plen = plen;
1314
1315 if (plen)
1316 memcpy(skb_put(skb, plen), param, plen);
1317
1318 BT_DBG("skb len %d", skb->len);
1319
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001320 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001324 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 return 0;
1327}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
1329/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001330void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 struct hci_command_hdr *hdr;
1333
1334 if (!hdev->sent_cmd)
1335 return NULL;
1336
1337 hdr = (void *) hdev->sent_cmd->data;
1338
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001339 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 return NULL;
1341
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001342 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1345}
1346
1347/* Send ACL data */
1348static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1349{
1350 struct hci_acl_hdr *hdr;
1351 int len = skb->len;
1352
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001353 skb_push(skb, HCI_ACL_HDR_SIZE);
1354 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001355 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001356 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1357 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358}
1359
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001360void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361{
1362 struct hci_dev *hdev = conn->hdev;
1363 struct sk_buff *list;
1364
1365 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1366
1367 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001368 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1370
1371 if (!(list = skb_shinfo(skb)->frag_list)) {
1372 /* Non fragmented */
1373 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1374
1375 skb_queue_tail(&conn->data_q, skb);
1376 } else {
1377 /* Fragmented */
1378 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1379
1380 skb_shinfo(skb)->frag_list = NULL;
1381
1382 /* Queue all fragments atomically */
1383 spin_lock_bh(&conn->data_q.lock);
1384
1385 __skb_queue_tail(&conn->data_q, skb);
1386 do {
1387 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001390 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1392
1393 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1394
1395 __skb_queue_tail(&conn->data_q, skb);
1396 } while (list);
1397
1398 spin_unlock_bh(&conn->data_q.lock);
1399 }
1400
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001401 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402}
1403EXPORT_SYMBOL(hci_send_acl);
1404
1405/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001406void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
1408 struct hci_dev *hdev = conn->hdev;
1409 struct hci_sco_hdr hdr;
1410
1411 BT_DBG("%s len %d", hdev->name, skb->len);
1412
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001413 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 hdr.dlen = skb->len;
1415
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001416 skb_push(skb, HCI_SCO_HDR_SIZE);
1417 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001418 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001421 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001424 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425}
1426EXPORT_SYMBOL(hci_send_sco);
1427
1428/* ---- HCI TX task (outgoing data) ---- */
1429
1430/* HCI Connection scheduler */
1431static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1432{
1433 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001434 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 int num = 0, min = ~0;
1436 struct list_head *p;
1437
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001438 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 * added and removed with TX task disabled. */
1440 list_for_each(p, &h->list) {
1441 struct hci_conn *c;
1442 c = list_entry(p, struct hci_conn, list);
1443
Marcel Holtmann769be972008-07-14 20:13:49 +02001444 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001446
1447 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1448 continue;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 num++;
1451
1452 if (c->sent < min) {
1453 min = c->sent;
1454 conn = c;
1455 }
1456 }
1457
1458 if (conn) {
1459 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1460 int q = cnt / num;
1461 *quote = q ? q : 1;
1462 } else
1463 *quote = 0;
1464
1465 BT_DBG("conn %p quote %d", conn, *quote);
1466 return conn;
1467}
1468
1469static inline void hci_acl_tx_to(struct hci_dev *hdev)
1470{
1471 struct hci_conn_hash *h = &hdev->conn_hash;
1472 struct list_head *p;
1473 struct hci_conn *c;
1474
1475 BT_ERR("%s ACL tx timeout", hdev->name);
1476
1477 /* Kill stalled connections */
1478 list_for_each(p, &h->list) {
1479 c = list_entry(p, struct hci_conn, list);
1480 if (c->type == ACL_LINK && c->sent) {
1481 BT_ERR("%s killing stalled ACL connection %s",
1482 hdev->name, batostr(&c->dst));
1483 hci_acl_disconn(c, 0x13);
1484 }
1485 }
1486}
1487
1488static inline void hci_sched_acl(struct hci_dev *hdev)
1489{
1490 struct hci_conn *conn;
1491 struct sk_buff *skb;
1492 int quote;
1493
1494 BT_DBG("%s", hdev->name);
1495
1496 if (!test_bit(HCI_RAW, &hdev->flags)) {
1497 /* ACL tx timeout must be longer than maximum
1498 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001499 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 hci_acl_tx_to(hdev);
1501 }
1502
1503 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1504 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1505 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001506
1507 hci_conn_enter_active_mode(conn);
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 hci_send_frame(skb);
1510 hdev->acl_last_tx = jiffies;
1511
1512 hdev->acl_cnt--;
1513 conn->sent++;
1514 }
1515 }
1516}
1517
1518/* Schedule SCO */
1519static inline void hci_sched_sco(struct hci_dev *hdev)
1520{
1521 struct hci_conn *conn;
1522 struct sk_buff *skb;
1523 int quote;
1524
1525 BT_DBG("%s", hdev->name);
1526
1527 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1528 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1529 BT_DBG("skb %p len %d", skb, skb->len);
1530 hci_send_frame(skb);
1531
1532 conn->sent++;
1533 if (conn->sent == ~0)
1534 conn->sent = 0;
1535 }
1536 }
1537}
1538
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001539static inline void hci_sched_esco(struct hci_dev *hdev)
1540{
1541 struct hci_conn *conn;
1542 struct sk_buff *skb;
1543 int quote;
1544
1545 BT_DBG("%s", hdev->name);
1546
1547 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1548 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1549 BT_DBG("skb %p len %d", skb, skb->len);
1550 hci_send_frame(skb);
1551
1552 conn->sent++;
1553 if (conn->sent == ~0)
1554 conn->sent = 0;
1555 }
1556 }
1557}
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559static void hci_tx_task(unsigned long arg)
1560{
1561 struct hci_dev *hdev = (struct hci_dev *) arg;
1562 struct sk_buff *skb;
1563
1564 read_lock(&hci_task_lock);
1565
1566 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1567
1568 /* Schedule queues and send stuff to HCI driver */
1569
1570 hci_sched_acl(hdev);
1571
1572 hci_sched_sco(hdev);
1573
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001574 hci_sched_esco(hdev);
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 /* Send next queued raw (unknown type) packet */
1577 while ((skb = skb_dequeue(&hdev->raw_q)))
1578 hci_send_frame(skb);
1579
1580 read_unlock(&hci_task_lock);
1581}
1582
1583/* ----- HCI RX task (incoming data proccessing) ----- */
1584
1585/* ACL data packet */
1586static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1587{
1588 struct hci_acl_hdr *hdr = (void *) skb->data;
1589 struct hci_conn *conn;
1590 __u16 handle, flags;
1591
1592 skb_pull(skb, HCI_ACL_HDR_SIZE);
1593
1594 handle = __le16_to_cpu(hdr->handle);
1595 flags = hci_flags(handle);
1596 handle = hci_handle(handle);
1597
1598 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1599
1600 hdev->stat.acl_rx++;
1601
1602 hci_dev_lock(hdev);
1603 conn = hci_conn_hash_lookup_handle(hdev, handle);
1604 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001605
Linus Torvalds1da177e2005-04-16 15:20:36 -07001606 if (conn) {
1607 register struct hci_proto *hp;
1608
Marcel Holtmann04837f62006-07-03 10:02:33 +02001609 hci_conn_enter_active_mode(conn);
1610
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 /* Send to upper protocol */
1612 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1613 hp->recv_acldata(conn, skb, flags);
1614 return;
1615 }
1616 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001617 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 hdev->name, handle);
1619 }
1620
1621 kfree_skb(skb);
1622}
1623
1624/* SCO data packet */
1625static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1626{
1627 struct hci_sco_hdr *hdr = (void *) skb->data;
1628 struct hci_conn *conn;
1629 __u16 handle;
1630
1631 skb_pull(skb, HCI_SCO_HDR_SIZE);
1632
1633 handle = __le16_to_cpu(hdr->handle);
1634
1635 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1636
1637 hdev->stat.sco_rx++;
1638
1639 hci_dev_lock(hdev);
1640 conn = hci_conn_hash_lookup_handle(hdev, handle);
1641 hci_dev_unlock(hdev);
1642
1643 if (conn) {
1644 register struct hci_proto *hp;
1645
1646 /* Send to upper protocol */
1647 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1648 hp->recv_scodata(conn, skb);
1649 return;
1650 }
1651 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001652 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 hdev->name, handle);
1654 }
1655
1656 kfree_skb(skb);
1657}
1658
Marcel Holtmann65164552005-10-28 19:20:48 +02001659static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660{
1661 struct hci_dev *hdev = (struct hci_dev *) arg;
1662 struct sk_buff *skb;
1663
1664 BT_DBG("%s", hdev->name);
1665
1666 read_lock(&hci_task_lock);
1667
1668 while ((skb = skb_dequeue(&hdev->rx_q))) {
1669 if (atomic_read(&hdev->promisc)) {
1670 /* Send copy to the sockets */
1671 hci_send_to_sock(hdev, skb);
1672 }
1673
1674 if (test_bit(HCI_RAW, &hdev->flags)) {
1675 kfree_skb(skb);
1676 continue;
1677 }
1678
1679 if (test_bit(HCI_INIT, &hdev->flags)) {
1680 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001681 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 case HCI_ACLDATA_PKT:
1683 case HCI_SCODATA_PKT:
1684 kfree_skb(skb);
1685 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 }
1688
1689 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001690 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 case HCI_EVENT_PKT:
1692 hci_event_packet(hdev, skb);
1693 break;
1694
1695 case HCI_ACLDATA_PKT:
1696 BT_DBG("%s ACL data packet", hdev->name);
1697 hci_acldata_packet(hdev, skb);
1698 break;
1699
1700 case HCI_SCODATA_PKT:
1701 BT_DBG("%s SCO data packet", hdev->name);
1702 hci_scodata_packet(hdev, skb);
1703 break;
1704
1705 default:
1706 kfree_skb(skb);
1707 break;
1708 }
1709 }
1710
1711 read_unlock(&hci_task_lock);
1712}
1713
1714static void hci_cmd_task(unsigned long arg)
1715{
1716 struct hci_dev *hdev = (struct hci_dev *) arg;
1717 struct sk_buff *skb;
1718
1719 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1720
S.Çağlar Onur824530212008-02-17 23:25:57 -08001721 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 BT_ERR("%s command tx timeout", hdev->name);
1723 atomic_set(&hdev->cmd_cnt, 1);
1724 }
1725
1726 /* Send queued commands */
1727 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001728 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
1730 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1731 atomic_dec(&hdev->cmd_cnt);
1732 hci_send_frame(skb);
1733 hdev->cmd_last_tx = jiffies;
1734 } else {
1735 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001736 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 }
1738 }
1739}