blob: 477c4a60a0790ec9b7ad4c7d38e2b4291a9faf5d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
47#include <asm/uaccess.h>
48#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68/* HCI protocols */
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, int result)
95{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
102 }
103}
104
105static void hci_req_cancel(struct hci_dev *hdev, int err)
106{
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 unsigned long opt, __u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
122
123 BT_DBG("%s start", hdev->name);
124
125 hdev->req_status = HCI_REQ_PEND;
126
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 req(hdev, opt);
131 schedule_timeout(timeout);
132
133 remove_wait_queue(&hdev->req_wait_q, &wait);
134
135 if (signal_pending(current))
136 return -EINTR;
137
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
142
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
146
147 default:
148 err = -ETIMEDOUT;
149 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 hdev->req_status = hdev->req_result = 0;
153
154 BT_DBG("%s end: err %d", hdev->name, err);
155
156 return err;
157}
158
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
161{
162 int ret;
163
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
171
172 return ret;
173}
174
175static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176{
177 BT_DBG("%s %ld", hdev->name, opt);
178
179 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{
185 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800186 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200187 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Driver initialization */
192
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100199 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 }
201 skb_queue_purge(&hdev->driver_init);
202
203 /* Mandatory initialization */
204
205 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#if 0
219 /* Host buffer size */
220 {
221 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228#endif
229
230 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* Optional initialization */
243
244 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200245 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700249 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258{
259 __u8 scan = opt;
260
261 BT_DBG("%s %x", hdev->name, scan);
262
263 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268{
269 __u8 auth = opt;
270
271 BT_DBG("%s %x", hdev->name, auth);
272
273 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
277static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 encrypt = opt;
280
281 BT_DBG("%s %x", hdev->name, encrypt);
282
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200283 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200287static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __le16 policy = cpu_to_le16(opt);
290
Marcel Holtmanna418b892008-11-30 12:17:28 +0100291 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295}
296
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900297/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 * Device is held on return. */
299struct hci_dev *hci_dev_get(int index)
300{
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
303
304 BT_DBG("%d", index);
305
306 if (index < 0)
307 return NULL;
308
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
315 }
316 }
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
319}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/* ---- Inquiry support ---- */
322static void inquiry_cache_flush(struct hci_dev *hdev)
323{
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
326
327 BT_DBG("cache %p", cache);
328
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
333 }
334}
335
336struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337{
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
340
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
347}
348
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
353
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 e->next = cache->list;
361 cache->list = e;
362 }
363
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
367}
368
369static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
374 int copied = 0;
375
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
384 info++;
385 }
386
387 BT_DBG("cache %p, copied %d", cache, copied);
388 return copied;
389}
390
391static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392{
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
395
396 BT_DBG("%s", hdev->name);
397
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
399 return;
400
401 /* Start Inquiry */
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406}
407
408int hci_inquiry(void __user *arg)
409{
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
414 long timeo;
415 __u8 *buf;
416
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
418 return -EFAULT;
419
420 if (!(hdev = hci_dev_get(ir.dev_id)))
421 return -ENODEV;
422
423 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
428 do_inquiry = 1;
429 }
430 hci_dev_unlock_bh(hdev);
431
Marcel Holtmann04837f62006-07-03 10:02:33 +0200432 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434 goto done;
435
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
441 */
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443 err = -ENOMEM;
444 goto done;
445 }
446
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
450
451 BT_DBG("num_rsp %d", ir.num_rsp);
452
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454 ptr += sizeof(ir);
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456 ir.num_rsp))
457 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900458 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 err = -EFAULT;
460
461 kfree(buf);
462
463done:
464 hci_dev_put(hdev);
465 return err;
466}
467
468/* ---- HCI ioctl helpers ---- */
469
470int hci_dev_open(__u16 dev)
471{
472 struct hci_dev *hdev;
473 int ret = 0;
474
475 if (!(hdev = hci_dev_get(dev)))
476 return -ENODEV;
477
478 BT_DBG("%s %p", hdev->name, hdev);
479
480 hci_req_lock(hdev);
481
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483 ret = -ERFKILL;
484 goto done;
485 }
486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (test_bit(HCI_UP, &hdev->flags)) {
488 ret = -EALREADY;
489 goto done;
490 }
491
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
494
Marcel Holtmann943da252010-02-13 02:28:41 +0100495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 if (hdev->open(hdev)) {
500 ret = -EIO;
501 goto done;
502 }
503
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
507
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
512 clear_bit(HCI_INIT, &hdev->flags);
513 }
514
515 if (!ret) {
516 hci_dev_hold(hdev);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900519 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
524
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
527
528 if (hdev->flush)
529 hdev->flush(hdev);
530
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
534 }
535
536 hdev->close(hdev);
537 hdev->flags = 0;
538 }
539
540done:
541 hci_req_unlock(hdev);
542 hci_dev_put(hdev);
543 return ret;
544}
545
546static int hci_dev_do_close(struct hci_dev *hdev)
547{
548 BT_DBG("%s %p", hdev->name, hdev);
549
550 hci_req_cancel(hdev, ENODEV);
551 hci_req_lock(hdev);
552
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
555 return 0;
556 }
557
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
561
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
Johan Hedbergf0358562010-05-18 13:20:32 +0200565 hci_blacklist_clear(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 hci_dev_unlock_bh(hdev);
567
568 hci_notify(hdev, HCI_DEV_DOWN);
569
570 if (hdev->flush)
571 hdev->flush(hdev);
572
573 /* Reset device */
574 skb_queue_purge(&hdev->cmd_q);
575 atomic_set(&hdev->cmd_cnt, 1);
576 if (!test_bit(HCI_RAW, &hdev->flags)) {
577 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200578 __hci_request(hdev, hci_reset_req, 0,
579 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 clear_bit(HCI_INIT, &hdev->flags);
581 }
582
583 /* Kill cmd task */
584 tasklet_kill(&hdev->cmd_task);
585
586 /* Drop queues */
587 skb_queue_purge(&hdev->rx_q);
588 skb_queue_purge(&hdev->cmd_q);
589 skb_queue_purge(&hdev->raw_q);
590
591 /* Drop last sent command */
592 if (hdev->sent_cmd) {
593 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL;
595 }
596
597 /* After this point our queues are empty
598 * and no tasks are scheduled. */
599 hdev->close(hdev);
600
601 /* Clear flags */
602 hdev->flags = 0;
603
604 hci_req_unlock(hdev);
605
606 hci_dev_put(hdev);
607 return 0;
608}
609
610int hci_dev_close(__u16 dev)
611{
612 struct hci_dev *hdev;
613 int err;
614
615 if (!(hdev = hci_dev_get(dev)))
616 return -ENODEV;
617 err = hci_dev_do_close(hdev);
618 hci_dev_put(hdev);
619 return err;
620}
621
622int hci_dev_reset(__u16 dev)
623{
624 struct hci_dev *hdev;
625 int ret = 0;
626
627 if (!(hdev = hci_dev_get(dev)))
628 return -ENODEV;
629
630 hci_req_lock(hdev);
631 tasklet_disable(&hdev->tx_task);
632
633 if (!test_bit(HCI_UP, &hdev->flags))
634 goto done;
635
636 /* Drop queues */
637 skb_queue_purge(&hdev->rx_q);
638 skb_queue_purge(&hdev->cmd_q);
639
640 hci_dev_lock_bh(hdev);
641 inquiry_cache_flush(hdev);
642 hci_conn_hash_flush(hdev);
643 hci_dev_unlock_bh(hdev);
644
645 if (hdev->flush)
646 hdev->flush(hdev);
647
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900648 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650
651 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200652 ret = __hci_request(hdev, hci_reset_req, 0,
653 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
655done:
656 tasklet_enable(&hdev->tx_task);
657 hci_req_unlock(hdev);
658 hci_dev_put(hdev);
659 return ret;
660}
661
662int hci_dev_reset_stat(__u16 dev)
663{
664 struct hci_dev *hdev;
665 int ret = 0;
666
667 if (!(hdev = hci_dev_get(dev)))
668 return -ENODEV;
669
670 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
671
672 hci_dev_put(hdev);
673
674 return ret;
675}
676
677int hci_dev_cmd(unsigned int cmd, void __user *arg)
678{
679 struct hci_dev *hdev;
680 struct hci_dev_req dr;
681 int err = 0;
682
683 if (copy_from_user(&dr, arg, sizeof(dr)))
684 return -EFAULT;
685
686 if (!(hdev = hci_dev_get(dr.dev_id)))
687 return -ENODEV;
688
689 switch (cmd) {
690 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200691 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 break;
694
695 case HCISETENCRYPT:
696 if (!lmp_encrypt_capable(hdev)) {
697 err = -EOPNOTSUPP;
698 break;
699 }
700
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200703 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 if (err)
706 break;
707 }
708
Marcel Holtmann04837f62006-07-03 10:02:33 +0200709 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 break;
712
713 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200714 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 break;
717
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200718 case HCISETLINKPOL:
719 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720 msecs_to_jiffies(HCI_INIT_TIMEOUT));
721 break;
722
723 case HCISETLINKMODE:
724 hdev->link_mode = ((__u16) dr.dev_opt) &
725 (HCI_LM_MASTER | HCI_LM_ACCEPT);
726 break;
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 case HCISETPTYPE:
729 hdev->pkt_type = (__u16) dr.dev_opt;
730 break;
731
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200733 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
734 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 break;
736
737 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200738 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
739 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741
742 default:
743 err = -EINVAL;
744 break;
745 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200746
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 hci_dev_put(hdev);
748 return err;
749}
750
751int hci_get_dev_list(void __user *arg)
752{
753 struct hci_dev_list_req *dl;
754 struct hci_dev_req *dr;
755 struct list_head *p;
756 int n = 0, size, err;
757 __u16 dev_num;
758
759 if (get_user(dev_num, (__u16 __user *) arg))
760 return -EFAULT;
761
762 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
763 return -EINVAL;
764
765 size = sizeof(*dl) + dev_num * sizeof(*dr);
766
Vegard Nossumc6bf5142008-11-30 12:17:19 +0100767 if (!(dl = kzalloc(size, GFP_KERNEL)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 return -ENOMEM;
769
770 dr = dl->dev_req;
771
772 read_lock_bh(&hci_dev_list_lock);
773 list_for_each(p, &hci_dev_list) {
774 struct hci_dev *hdev;
775 hdev = list_entry(p, struct hci_dev, list);
776 (dr + n)->dev_id = hdev->id;
777 (dr + n)->dev_opt = hdev->flags;
778 if (++n >= dev_num)
779 break;
780 }
781 read_unlock_bh(&hci_dev_list_lock);
782
783 dl->dev_num = n;
784 size = sizeof(*dl) + n * sizeof(*dr);
785
786 err = copy_to_user(arg, dl, size);
787 kfree(dl);
788
789 return err ? -EFAULT : 0;
790}
791
792int hci_get_dev_info(void __user *arg)
793{
794 struct hci_dev *hdev;
795 struct hci_dev_info di;
796 int err = 0;
797
798 if (copy_from_user(&di, arg, sizeof(di)))
799 return -EFAULT;
800
801 if (!(hdev = hci_dev_get(di.dev_id)))
802 return -ENODEV;
803
804 strcpy(di.name, hdev->name);
805 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 di.flags = hdev->flags;
808 di.pkt_type = hdev->pkt_type;
809 di.acl_mtu = hdev->acl_mtu;
810 di.acl_pkts = hdev->acl_pkts;
811 di.sco_mtu = hdev->sco_mtu;
812 di.sco_pkts = hdev->sco_pkts;
813 di.link_policy = hdev->link_policy;
814 di.link_mode = hdev->link_mode;
815
816 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817 memcpy(&di.features, &hdev->features, sizeof(di.features));
818
819 if (copy_to_user(arg, &di, sizeof(di)))
820 err = -EFAULT;
821
822 hci_dev_put(hdev);
823
824 return err;
825}
826
827/* ---- Interface to HCI drivers ---- */
828
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200829static int hci_rfkill_set_block(void *data, bool blocked)
830{
831 struct hci_dev *hdev = data;
832
833 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
834
835 if (!blocked)
836 return 0;
837
838 hci_dev_do_close(hdev);
839
840 return 0;
841}
842
843static const struct rfkill_ops hci_rfkill_ops = {
844 .set_block = hci_rfkill_set_block,
845};
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847/* Alloc HCI device */
848struct hci_dev *hci_alloc_dev(void)
849{
850 struct hci_dev *hdev;
851
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200852 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 if (!hdev)
854 return NULL;
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 skb_queue_head_init(&hdev->driver_init);
857
858 return hdev;
859}
860EXPORT_SYMBOL(hci_alloc_dev);
861
862/* Free HCI device */
863void hci_free_dev(struct hci_dev *hdev)
864{
865 skb_queue_purge(&hdev->driver_init);
866
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200867 /* will free via device release */
868 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869}
870EXPORT_SYMBOL(hci_free_dev);
871
872/* Register HCI device */
873int hci_register_dev(struct hci_dev *hdev)
874{
875 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200876 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100878 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 if (!hdev->open || !hdev->close || !hdev->destruct)
882 return -EINVAL;
883
884 write_lock_bh(&hci_dev_list_lock);
885
886 /* Find first available device id */
887 list_for_each(p, &hci_dev_list) {
888 if (list_entry(p, struct hci_dev, list)->id != id)
889 break;
890 head = p; id++;
891 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 sprintf(hdev->name, "hci%d", id);
894 hdev->id = id;
895 list_add(&hdev->list, head);
896
897 atomic_set(&hdev->refcnt, 1);
898 spin_lock_init(&hdev->lock);
899
900 hdev->flags = 0;
901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200902 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 hdev->link_mode = (HCI_LM_ACCEPT);
904
Marcel Holtmann04837f62006-07-03 10:02:33 +0200905 hdev->idle_timeout = 0;
906 hdev->sniff_max_interval = 800;
907 hdev->sniff_min_interval = 80;
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912
913 skb_queue_head_init(&hdev->rx_q);
914 skb_queue_head_init(&hdev->cmd_q);
915 skb_queue_head_init(&hdev->raw_q);
916
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530917 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200918 hdev->reassembly[i] = NULL;
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000921 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 inquiry_cache_init(hdev);
924
925 hci_conn_hash_init(hdev);
926
Johan Hedbergf0358562010-05-18 13:20:32 +0200927 INIT_LIST_HEAD(&hdev->blacklist.list);
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930
931 atomic_set(&hdev->promisc, 0);
932
933 write_unlock_bh(&hci_dev_list_lock);
934
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100935 hdev->workqueue = create_singlethread_workqueue(hdev->name);
936 if (!hdev->workqueue)
937 goto nomem;
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 hci_register_sysfs(hdev);
940
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200941 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943 if (hdev->rfkill) {
944 if (rfkill_register(hdev->rfkill) < 0) {
945 rfkill_destroy(hdev->rfkill);
946 hdev->rfkill = NULL;
947 }
948 }
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 hci_notify(hdev, HCI_DEV_REG);
951
952 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100953
954nomem:
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
958
959 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960}
961EXPORT_SYMBOL(hci_register_dev);
962
963/* Unregister HCI device */
964int hci_unregister_dev(struct hci_dev *hdev)
965{
Marcel Holtmannef222012007-07-11 06:42:04 +0200966 int i;
967
Marcel Holtmannc13854c2010-02-08 15:27:07 +0100968 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 write_lock_bh(&hci_dev_list_lock);
971 list_del(&hdev->list);
972 write_unlock_bh(&hci_dev_list_lock);
973
974 hci_dev_do_close(hdev);
975
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530976 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200977 kfree_skb(hdev->reassembly[i]);
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 hci_notify(hdev, HCI_DEV_UNREG);
980
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200981 if (hdev->rfkill) {
982 rfkill_unregister(hdev->rfkill);
983 rfkill_destroy(hdev->rfkill);
984 }
985
Dave Young147e2d52008-03-05 18:45:59 -0800986 hci_unregister_sysfs(hdev);
987
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100988 destroy_workqueue(hdev->workqueue);
989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +0200991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 return 0;
993}
994EXPORT_SYMBOL(hci_unregister_dev);
995
996/* Suspend HCI device */
997int hci_suspend_dev(struct hci_dev *hdev)
998{
999 hci_notify(hdev, HCI_DEV_SUSPEND);
1000 return 0;
1001}
1002EXPORT_SYMBOL(hci_suspend_dev);
1003
1004/* Resume HCI device */
1005int hci_resume_dev(struct hci_dev *hdev)
1006{
1007 hci_notify(hdev, HCI_DEV_RESUME);
1008 return 0;
1009}
1010EXPORT_SYMBOL(hci_resume_dev);
1011
Marcel Holtmann76bca882009-11-18 00:40:39 +01001012/* Receive frame from HCI drivers */
1013int hci_recv_frame(struct sk_buff *skb)
1014{
1015 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017 && !test_bit(HCI_INIT, &hdev->flags))) {
1018 kfree_skb(skb);
1019 return -ENXIO;
1020 }
1021
1022 /* Incomming skb */
1023 bt_cb(skb)->incoming = 1;
1024
1025 /* Time stamp */
1026 __net_timestamp(skb);
1027
1028 /* Queue frame for rx task */
1029 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001030 tasklet_schedule(&hdev->rx_task);
1031
Marcel Holtmann76bca882009-11-18 00:40:39 +01001032 return 0;
1033}
1034EXPORT_SYMBOL(hci_recv_frame);
1035
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301036static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037 int count, __u8 index, gfp_t gfp_mask)
1038{
1039 int len = 0;
1040 int hlen = 0;
1041 int remain = count;
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1044
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1047 return -EILSEQ;
1048
1049 skb = hdev->reassembly[index];
1050
1051 if (!skb) {
1052 switch (type) {
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1056 break;
1057 case HCI_EVENT_PKT:
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1060 break;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1064 break;
1065 }
1066
1067 skb = bt_skb_alloc(len, gfp_mask);
1068 if (!skb)
1069 return -ENOMEM;
1070
1071 scb = (void *) skb->cb;
1072 scb->expect = hlen;
1073 scb->pkt_type = type;
1074
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1077 }
1078
1079 while (count) {
1080 scb = (void *) skb->cb;
1081 len = min(scb->expect, (__u16)count);
1082
1083 memcpy(skb_put(skb, len), data, len);
1084
1085 count -= len;
1086 data += len;
1087 scb->expect -= len;
1088 remain = count;
1089
1090 switch (type) {
1091 case HCI_EVENT_PKT:
1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1094 scb->expect = h->plen;
1095
1096 if (skb_tailroom(skb) < scb->expect) {
1097 kfree_skb(skb);
1098 hdev->reassembly[index] = NULL;
1099 return -ENOMEM;
1100 }
1101 }
1102 break;
1103
1104 case HCI_ACLDATA_PKT:
1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107 scb->expect = __le16_to_cpu(h->dlen);
1108
1109 if (skb_tailroom(skb) < scb->expect) {
1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1113 }
1114 }
1115 break;
1116
1117 case HCI_SCODATA_PKT:
1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120 scb->expect = h->dlen;
1121
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1126 }
1127 }
1128 break;
1129 }
1130
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1133
1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1136
1137 hdev->reassembly[index] = NULL;
1138 return remain;
1139 }
1140 }
1141
1142 return remain;
1143}
1144
Marcel Holtmannef222012007-07-11 06:42:04 +02001145/* Receive packet type fragment */
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301146#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 1])
Marcel Holtmannef222012007-07-11 06:42:04 +02001147
1148int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1149{
1150 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1151 return -EILSEQ;
1152
1153 while (count) {
1154 struct sk_buff *skb = __reassembly(hdev, type);
1155 struct { int expect; } *scb;
1156 int len = 0;
1157
1158 if (!skb) {
1159 /* Start of the frame */
1160
1161 switch (type) {
1162 case HCI_EVENT_PKT:
1163 if (count >= HCI_EVENT_HDR_SIZE) {
1164 struct hci_event_hdr *h = data;
1165 len = HCI_EVENT_HDR_SIZE + h->plen;
1166 } else
1167 return -EILSEQ;
1168 break;
1169
1170 case HCI_ACLDATA_PKT:
1171 if (count >= HCI_ACL_HDR_SIZE) {
1172 struct hci_acl_hdr *h = data;
1173 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1174 } else
1175 return -EILSEQ;
1176 break;
1177
1178 case HCI_SCODATA_PKT:
1179 if (count >= HCI_SCO_HDR_SIZE) {
1180 struct hci_sco_hdr *h = data;
1181 len = HCI_SCO_HDR_SIZE + h->dlen;
1182 } else
1183 return -EILSEQ;
1184 break;
1185 }
1186
1187 skb = bt_skb_alloc(len, GFP_ATOMIC);
1188 if (!skb) {
1189 BT_ERR("%s no memory for packet", hdev->name);
1190 return -ENOMEM;
1191 }
1192
1193 skb->dev = (void *) hdev;
1194 bt_cb(skb)->pkt_type = type;
YOSHIFUJI Hideaki00ae02f2007-07-19 10:43:16 +09001195
Marcel Holtmannef222012007-07-11 06:42:04 +02001196 __reassembly(hdev, type) = skb;
1197
1198 scb = (void *) skb->cb;
1199 scb->expect = len;
1200 } else {
1201 /* Continuation */
1202
1203 scb = (void *) skb->cb;
1204 len = scb->expect;
1205 }
1206
1207 len = min(len, count);
1208
1209 memcpy(skb_put(skb, len), data, len);
1210
1211 scb->expect -= len;
1212
1213 if (scb->expect == 0) {
1214 /* Complete frame */
1215
1216 __reassembly(hdev, type) = NULL;
1217
1218 bt_cb(skb)->pkt_type = type;
1219 hci_recv_frame(skb);
1220 }
1221
1222 count -= len; data += len;
1223 }
1224
1225 return 0;
1226}
1227EXPORT_SYMBOL(hci_recv_fragment);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229/* ---- Interface to upper protocols ---- */
1230
1231/* Register/Unregister protocols.
1232 * hci_task_lock is used to ensure that no tasks are running. */
1233int hci_register_proto(struct hci_proto *hp)
1234{
1235 int err = 0;
1236
1237 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1238
1239 if (hp->id >= HCI_MAX_PROTO)
1240 return -EINVAL;
1241
1242 write_lock_bh(&hci_task_lock);
1243
1244 if (!hci_proto[hp->id])
1245 hci_proto[hp->id] = hp;
1246 else
1247 err = -EEXIST;
1248
1249 write_unlock_bh(&hci_task_lock);
1250
1251 return err;
1252}
1253EXPORT_SYMBOL(hci_register_proto);
1254
1255int hci_unregister_proto(struct hci_proto *hp)
1256{
1257 int err = 0;
1258
1259 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1260
1261 if (hp->id >= HCI_MAX_PROTO)
1262 return -EINVAL;
1263
1264 write_lock_bh(&hci_task_lock);
1265
1266 if (hci_proto[hp->id])
1267 hci_proto[hp->id] = NULL;
1268 else
1269 err = -ENOENT;
1270
1271 write_unlock_bh(&hci_task_lock);
1272
1273 return err;
1274}
1275EXPORT_SYMBOL(hci_unregister_proto);
1276
1277int hci_register_cb(struct hci_cb *cb)
1278{
1279 BT_DBG("%p name %s", cb, cb->name);
1280
1281 write_lock_bh(&hci_cb_list_lock);
1282 list_add(&cb->list, &hci_cb_list);
1283 write_unlock_bh(&hci_cb_list_lock);
1284
1285 return 0;
1286}
1287EXPORT_SYMBOL(hci_register_cb);
1288
1289int hci_unregister_cb(struct hci_cb *cb)
1290{
1291 BT_DBG("%p name %s", cb, cb->name);
1292
1293 write_lock_bh(&hci_cb_list_lock);
1294 list_del(&cb->list);
1295 write_unlock_bh(&hci_cb_list_lock);
1296
1297 return 0;
1298}
1299EXPORT_SYMBOL(hci_unregister_cb);
1300
1301static int hci_send_frame(struct sk_buff *skb)
1302{
1303 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1304
1305 if (!hdev) {
1306 kfree_skb(skb);
1307 return -ENODEV;
1308 }
1309
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 if (atomic_read(&hdev->promisc)) {
1313 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001314 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 hci_send_to_sock(hdev, skb);
1317 }
1318
1319 /* Get rid of skb owner, prior to sending to the driver. */
1320 skb_orphan(skb);
1321
1322 return hdev->send(skb);
1323}
1324
1325/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001326int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
1328 int len = HCI_COMMAND_HDR_SIZE + plen;
1329 struct hci_command_hdr *hdr;
1330 struct sk_buff *skb;
1331
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001332 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 skb = bt_skb_alloc(len, GFP_ATOMIC);
1335 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001336 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 return -ENOMEM;
1338 }
1339
1340 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001341 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 hdr->plen = plen;
1343
1344 if (plen)
1345 memcpy(skb_put(skb, plen), param, plen);
1346
1347 BT_DBG("skb len %d", skb->len);
1348
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001349 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001353 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354
1355 return 0;
1356}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001359void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360{
1361 struct hci_command_hdr *hdr;
1362
1363 if (!hdev->sent_cmd)
1364 return NULL;
1365
1366 hdr = (void *) hdev->sent_cmd->data;
1367
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001368 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return NULL;
1370
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001371 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1374}
1375
1376/* Send ACL data */
1377static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1378{
1379 struct hci_acl_hdr *hdr;
1380 int len = skb->len;
1381
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001382 skb_push(skb, HCI_ACL_HDR_SIZE);
1383 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001384 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001385 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1386 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387}
1388
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001389void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390{
1391 struct hci_dev *hdev = conn->hdev;
1392 struct sk_buff *list;
1393
1394 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1395
1396 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001397 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1399
1400 if (!(list = skb_shinfo(skb)->frag_list)) {
1401 /* Non fragmented */
1402 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1403
1404 skb_queue_tail(&conn->data_q, skb);
1405 } else {
1406 /* Fragmented */
1407 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1408
1409 skb_shinfo(skb)->frag_list = NULL;
1410
1411 /* Queue all fragments atomically */
1412 spin_lock_bh(&conn->data_q.lock);
1413
1414 __skb_queue_tail(&conn->data_q, skb);
1415 do {
1416 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001419 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1421
1422 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1423
1424 __skb_queue_tail(&conn->data_q, skb);
1425 } while (list);
1426
1427 spin_unlock_bh(&conn->data_q.lock);
1428 }
1429
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001430 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431}
1432EXPORT_SYMBOL(hci_send_acl);
1433
1434/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001435void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436{
1437 struct hci_dev *hdev = conn->hdev;
1438 struct hci_sco_hdr hdr;
1439
1440 BT_DBG("%s len %d", hdev->name, skb->len);
1441
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001442 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 hdr.dlen = skb->len;
1444
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001445 skb_push(skb, HCI_SCO_HDR_SIZE);
1446 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001447 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001450 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001453 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454}
1455EXPORT_SYMBOL(hci_send_sco);
1456
1457/* ---- HCI TX task (outgoing data) ---- */
1458
1459/* HCI Connection scheduler */
1460static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1461{
1462 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001463 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 int num = 0, min = ~0;
1465 struct list_head *p;
1466
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001467 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 * added and removed with TX task disabled. */
1469 list_for_each(p, &h->list) {
1470 struct hci_conn *c;
1471 c = list_entry(p, struct hci_conn, list);
1472
Marcel Holtmann769be972008-07-14 20:13:49 +02001473 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001475
1476 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1477 continue;
1478
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 num++;
1480
1481 if (c->sent < min) {
1482 min = c->sent;
1483 conn = c;
1484 }
1485 }
1486
1487 if (conn) {
1488 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1489 int q = cnt / num;
1490 *quote = q ? q : 1;
1491 } else
1492 *quote = 0;
1493
1494 BT_DBG("conn %p quote %d", conn, *quote);
1495 return conn;
1496}
1497
1498static inline void hci_acl_tx_to(struct hci_dev *hdev)
1499{
1500 struct hci_conn_hash *h = &hdev->conn_hash;
1501 struct list_head *p;
1502 struct hci_conn *c;
1503
1504 BT_ERR("%s ACL tx timeout", hdev->name);
1505
1506 /* Kill stalled connections */
1507 list_for_each(p, &h->list) {
1508 c = list_entry(p, struct hci_conn, list);
1509 if (c->type == ACL_LINK && c->sent) {
1510 BT_ERR("%s killing stalled ACL connection %s",
1511 hdev->name, batostr(&c->dst));
1512 hci_acl_disconn(c, 0x13);
1513 }
1514 }
1515}
1516
1517static inline void hci_sched_acl(struct hci_dev *hdev)
1518{
1519 struct hci_conn *conn;
1520 struct sk_buff *skb;
1521 int quote;
1522
1523 BT_DBG("%s", hdev->name);
1524
1525 if (!test_bit(HCI_RAW, &hdev->flags)) {
1526 /* ACL tx timeout must be longer than maximum
1527 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001528 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 hci_acl_tx_to(hdev);
1530 }
1531
1532 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1533 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1534 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001535
1536 hci_conn_enter_active_mode(conn);
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 hci_send_frame(skb);
1539 hdev->acl_last_tx = jiffies;
1540
1541 hdev->acl_cnt--;
1542 conn->sent++;
1543 }
1544 }
1545}
1546
1547/* Schedule SCO */
1548static inline void hci_sched_sco(struct hci_dev *hdev)
1549{
1550 struct hci_conn *conn;
1551 struct sk_buff *skb;
1552 int quote;
1553
1554 BT_DBG("%s", hdev->name);
1555
1556 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1557 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1558 BT_DBG("skb %p len %d", skb, skb->len);
1559 hci_send_frame(skb);
1560
1561 conn->sent++;
1562 if (conn->sent == ~0)
1563 conn->sent = 0;
1564 }
1565 }
1566}
1567
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001568static inline void hci_sched_esco(struct hci_dev *hdev)
1569{
1570 struct hci_conn *conn;
1571 struct sk_buff *skb;
1572 int quote;
1573
1574 BT_DBG("%s", hdev->name);
1575
1576 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1577 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1578 BT_DBG("skb %p len %d", skb, skb->len);
1579 hci_send_frame(skb);
1580
1581 conn->sent++;
1582 if (conn->sent == ~0)
1583 conn->sent = 0;
1584 }
1585 }
1586}
1587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588static void hci_tx_task(unsigned long arg)
1589{
1590 struct hci_dev *hdev = (struct hci_dev *) arg;
1591 struct sk_buff *skb;
1592
1593 read_lock(&hci_task_lock);
1594
1595 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1596
1597 /* Schedule queues and send stuff to HCI driver */
1598
1599 hci_sched_acl(hdev);
1600
1601 hci_sched_sco(hdev);
1602
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001603 hci_sched_esco(hdev);
1604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 /* Send next queued raw (unknown type) packet */
1606 while ((skb = skb_dequeue(&hdev->raw_q)))
1607 hci_send_frame(skb);
1608
1609 read_unlock(&hci_task_lock);
1610}
1611
1612/* ----- HCI RX task (incoming data proccessing) ----- */
1613
1614/* ACL data packet */
1615static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1616{
1617 struct hci_acl_hdr *hdr = (void *) skb->data;
1618 struct hci_conn *conn;
1619 __u16 handle, flags;
1620
1621 skb_pull(skb, HCI_ACL_HDR_SIZE);
1622
1623 handle = __le16_to_cpu(hdr->handle);
1624 flags = hci_flags(handle);
1625 handle = hci_handle(handle);
1626
1627 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1628
1629 hdev->stat.acl_rx++;
1630
1631 hci_dev_lock(hdev);
1632 conn = hci_conn_hash_lookup_handle(hdev, handle);
1633 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 if (conn) {
1636 register struct hci_proto *hp;
1637
Marcel Holtmann04837f62006-07-03 10:02:33 +02001638 hci_conn_enter_active_mode(conn);
1639
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 /* Send to upper protocol */
1641 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1642 hp->recv_acldata(conn, skb, flags);
1643 return;
1644 }
1645 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001646 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647 hdev->name, handle);
1648 }
1649
1650 kfree_skb(skb);
1651}
1652
1653/* SCO data packet */
1654static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1655{
1656 struct hci_sco_hdr *hdr = (void *) skb->data;
1657 struct hci_conn *conn;
1658 __u16 handle;
1659
1660 skb_pull(skb, HCI_SCO_HDR_SIZE);
1661
1662 handle = __le16_to_cpu(hdr->handle);
1663
1664 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1665
1666 hdev->stat.sco_rx++;
1667
1668 hci_dev_lock(hdev);
1669 conn = hci_conn_hash_lookup_handle(hdev, handle);
1670 hci_dev_unlock(hdev);
1671
1672 if (conn) {
1673 register struct hci_proto *hp;
1674
1675 /* Send to upper protocol */
1676 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1677 hp->recv_scodata(conn, skb);
1678 return;
1679 }
1680 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001681 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 hdev->name, handle);
1683 }
1684
1685 kfree_skb(skb);
1686}
1687
Marcel Holtmann65164552005-10-28 19:20:48 +02001688static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689{
1690 struct hci_dev *hdev = (struct hci_dev *) arg;
1691 struct sk_buff *skb;
1692
1693 BT_DBG("%s", hdev->name);
1694
1695 read_lock(&hci_task_lock);
1696
1697 while ((skb = skb_dequeue(&hdev->rx_q))) {
1698 if (atomic_read(&hdev->promisc)) {
1699 /* Send copy to the sockets */
1700 hci_send_to_sock(hdev, skb);
1701 }
1702
1703 if (test_bit(HCI_RAW, &hdev->flags)) {
1704 kfree_skb(skb);
1705 continue;
1706 }
1707
1708 if (test_bit(HCI_INIT, &hdev->flags)) {
1709 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001710 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 case HCI_ACLDATA_PKT:
1712 case HCI_SCODATA_PKT:
1713 kfree_skb(skb);
1714 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001715 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 }
1717
1718 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001719 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 case HCI_EVENT_PKT:
1721 hci_event_packet(hdev, skb);
1722 break;
1723
1724 case HCI_ACLDATA_PKT:
1725 BT_DBG("%s ACL data packet", hdev->name);
1726 hci_acldata_packet(hdev, skb);
1727 break;
1728
1729 case HCI_SCODATA_PKT:
1730 BT_DBG("%s SCO data packet", hdev->name);
1731 hci_scodata_packet(hdev, skb);
1732 break;
1733
1734 default:
1735 kfree_skb(skb);
1736 break;
1737 }
1738 }
1739
1740 read_unlock(&hci_task_lock);
1741}
1742
1743static void hci_cmd_task(unsigned long arg)
1744{
1745 struct hci_dev *hdev = (struct hci_dev *) arg;
1746 struct sk_buff *skb;
1747
1748 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1749
S.Çağlar Onur824530212008-02-17 23:25:57 -08001750 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 BT_ERR("%s command tx timeout", hdev->name);
1752 atomic_set(&hdev->cmd_cnt, 1);
1753 }
1754
1755 /* Send queued commands */
1756 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001757 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1760 atomic_dec(&hdev->cmd_cnt);
1761 hci_send_frame(skb);
1762 hdev->cmd_last_tx = jiffies;
1763 } else {
1764 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001765 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 }
1767 }
1768}