blob: 4b62ed01ddc619763bee5c0432b6dd8dd2a50579 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/interrupt.h>
41#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020042#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <net/sock.h>
44
45#include <asm/system.h>
46#include <asm/uaccess.h>
47#include <asm/unaligned.h>
48
49#include <net/bluetooth/bluetooth.h>
50#include <net/bluetooth/hci_core.h>
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052static void hci_cmd_task(unsigned long arg);
53static void hci_rx_task(unsigned long arg);
54static void hci_tx_task(unsigned long arg);
55static void hci_notify(struct hci_dev *hdev, int event);
56
57static DEFINE_RWLOCK(hci_task_lock);
58
59/* HCI device list */
60LIST_HEAD(hci_dev_list);
61DEFINE_RWLOCK(hci_dev_list_lock);
62
63/* HCI callback list */
64LIST_HEAD(hci_cb_list);
65DEFINE_RWLOCK(hci_cb_list_lock);
66
67/* HCI protocols */
68#define HCI_MAX_PROTO 2
69struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080072static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
74/* ---- HCI notifications ---- */
75
76int hci_register_notifier(struct notifier_block *nb)
77{
Alan Sterne041c682006-03-27 01:16:30 -080078 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
81int hci_unregister_notifier(struct notifier_block *nb)
82{
Alan Sterne041c682006-03-27 01:16:30 -080083 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Marcel Holtmann65164552005-10-28 19:20:48 +020086static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Alan Sterne041c682006-03-27 01:16:30 -080088 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91/* ---- HCI requests ---- */
92
93void hci_req_complete(struct hci_dev *hdev, int result)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
104static void hci_req_cancel(struct hci_dev *hdev, int err)
105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900116static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 unsigned long opt, __u32 timeout)
118{
119 DECLARE_WAITQUEUE(wait, current);
120 int err = 0;
121
122 BT_DBG("%s start", hdev->name);
123
124 hdev->req_status = HCI_REQ_PEND;
125
126 add_wait_queue(&hdev->req_wait_q, &wait);
127 set_current_state(TASK_INTERRUPTIBLE);
128
129 req(hdev, opt);
130 schedule_timeout(timeout);
131
132 remove_wait_queue(&hdev->req_wait_q, &wait);
133
134 if (signal_pending(current))
135 return -EINTR;
136
137 switch (hdev->req_status) {
138 case HCI_REQ_DONE:
139 err = -bt_err(hdev->req_result);
140 break;
141
142 case HCI_REQ_CANCELED:
143 err = -hdev->req_result;
144 break;
145
146 default:
147 err = -ETIMEDOUT;
148 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700149 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150
151 hdev->req_status = hdev->req_result = 0;
152
153 BT_DBG("%s end: err %d", hdev->name, err);
154
155 return err;
156}
157
158static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159 unsigned long opt, __u32 timeout)
160{
161 int ret;
162
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200163 if (!test_bit(HCI_UP, &hdev->flags))
164 return -ENETDOWN;
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 /* Serialize all requests */
167 hci_req_lock(hdev);
168 ret = __hci_request(hdev, req, opt, timeout);
169 hci_req_unlock(hdev);
170
171 return ret;
172}
173
174static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175{
176 BT_DBG("%s %ld", hdev->name, opt);
177
178 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200179 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180}
181
182static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183{
184 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800185 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200186 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Driver initialization */
191
192 /* Special commands */
193 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700194 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100198 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 }
200 skb_queue_purge(&hdev->driver_init);
201
202 /* Mandatory initialization */
203
204 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
208 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200215 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217#if 0
218 /* Host buffer size */
219 {
220 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700221 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700223 cp.acl_max_pkt = cpu_to_le16(0xffff);
224 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 }
227#endif
228
229 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231
232 /* Read Class of Device */
233 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234
235 /* Read Local Name */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
238 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
241 /* Optional initialization */
242
243 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200244 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200245 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700248 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257{
258 __u8 scan = opt;
259
260 BT_DBG("%s %x", hdev->name, scan);
261
262 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 auth = opt;
269
270 BT_DBG("%s %x", hdev->name, auth);
271
272 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200273 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 encrypt = opt;
279
280 BT_DBG("%s %x", hdev->name, encrypt);
281
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200282 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200283 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200286static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __le16 policy = cpu_to_le16(opt);
289
Marcel Holtmanna418b892008-11-30 12:17:28 +0100290 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200291
292 /* Default link policy */
293 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294}
295
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900296/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 * Device is held on return. */
298struct hci_dev *hci_dev_get(int index)
299{
300 struct hci_dev *hdev = NULL;
301 struct list_head *p;
302
303 BT_DBG("%d", index);
304
305 if (index < 0)
306 return NULL;
307
308 read_lock(&hci_dev_list_lock);
309 list_for_each(p, &hci_dev_list) {
310 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311 if (d->id == index) {
312 hdev = hci_dev_hold(d);
313 break;
314 }
315 }
316 read_unlock(&hci_dev_list_lock);
317 return hdev;
318}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320/* ---- Inquiry support ---- */
321static void inquiry_cache_flush(struct hci_dev *hdev)
322{
323 struct inquiry_cache *cache = &hdev->inq_cache;
324 struct inquiry_entry *next = cache->list, *e;
325
326 BT_DBG("cache %p", cache);
327
328 cache->list = NULL;
329 while ((e = next)) {
330 next = e->next;
331 kfree(e);
332 }
333}
334
335struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336{
337 struct inquiry_cache *cache = &hdev->inq_cache;
338 struct inquiry_entry *e;
339
340 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341
342 for (e = cache->list; e; e = e->next)
343 if (!bacmp(&e->data.bdaddr, bdaddr))
344 break;
345 return e;
346}
347
348void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349{
350 struct inquiry_cache *cache = &hdev->inq_cache;
351 struct inquiry_entry *e;
352
353 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354
355 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200357 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 e->next = cache->list;
360 cache->list = e;
361 }
362
363 memcpy(&e->data, data, sizeof(*data));
364 e->timestamp = jiffies;
365 cache->timestamp = jiffies;
366}
367
368static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_info *info = (struct inquiry_info *) buf;
372 struct inquiry_entry *e;
373 int copied = 0;
374
375 for (e = cache->list; e && copied < num; e = e->next, copied++) {
376 struct inquiry_data *data = &e->data;
377 bacpy(&info->bdaddr, &data->bdaddr);
378 info->pscan_rep_mode = data->pscan_rep_mode;
379 info->pscan_period_mode = data->pscan_period_mode;
380 info->pscan_mode = data->pscan_mode;
381 memcpy(info->dev_class, data->dev_class, 3);
382 info->clock_offset = data->clock_offset;
383 info++;
384 }
385
386 BT_DBG("cache %p, copied %d", cache, copied);
387 return copied;
388}
389
390static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391{
392 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393 struct hci_cp_inquiry cp;
394
395 BT_DBG("%s", hdev->name);
396
397 if (test_bit(HCI_INQUIRY, &hdev->flags))
398 return;
399
400 /* Start Inquiry */
401 memcpy(&cp.lap, &ir->lap, 3);
402 cp.length = ir->length;
403 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200404 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
407int hci_inquiry(void __user *arg)
408{
409 __u8 __user *ptr = arg;
410 struct hci_inquiry_req ir;
411 struct hci_dev *hdev;
412 int err = 0, do_inquiry = 0, max_rsp;
413 long timeo;
414 __u8 *buf;
415
416 if (copy_from_user(&ir, ptr, sizeof(ir)))
417 return -EFAULT;
418
419 if (!(hdev = hci_dev_get(ir.dev_id)))
420 return -ENODEV;
421
422 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900423 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 inquiry_cache_empty(hdev) ||
425 ir.flags & IREQ_CACHE_FLUSH) {
426 inquiry_cache_flush(hdev);
427 do_inquiry = 1;
428 }
429 hci_dev_unlock_bh(hdev);
430
Marcel Holtmann04837f62006-07-03 10:02:33 +0200431 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433 goto done;
434
435 /* for unlimited number of responses we will use buffer with 255 entries */
436 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437
438 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439 * copy it to the user space.
440 */
441 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442 err = -ENOMEM;
443 goto done;
444 }
445
446 hci_dev_lock_bh(hdev);
447 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448 hci_dev_unlock_bh(hdev);
449
450 BT_DBG("num_rsp %d", ir.num_rsp);
451
452 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453 ptr += sizeof(ir);
454 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455 ir.num_rsp))
456 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900457 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 err = -EFAULT;
459
460 kfree(buf);
461
462done:
463 hci_dev_put(hdev);
464 return err;
465}
466
467/* ---- HCI ioctl helpers ---- */
468
469int hci_dev_open(__u16 dev)
470{
471 struct hci_dev *hdev;
472 int ret = 0;
473
474 if (!(hdev = hci_dev_get(dev)))
475 return -ENODEV;
476
477 BT_DBG("%s %p", hdev->name, hdev);
478
479 hci_req_lock(hdev);
480
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200481 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482 ret = -ERFKILL;
483 goto done;
484 }
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (test_bit(HCI_UP, &hdev->flags)) {
487 ret = -EALREADY;
488 goto done;
489 }
490
491 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492 set_bit(HCI_RAW, &hdev->flags);
493
494 if (hdev->open(hdev)) {
495 ret = -EIO;
496 goto done;
497 }
498
499 if (!test_bit(HCI_RAW, &hdev->flags)) {
500 atomic_set(&hdev->cmd_cnt, 1);
501 set_bit(HCI_INIT, &hdev->flags);
502
503 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200504 ret = __hci_request(hdev, hci_init_req, 0,
505 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506
507 clear_bit(HCI_INIT, &hdev->flags);
508 }
509
510 if (!ret) {
511 hci_dev_hold(hdev);
512 set_bit(HCI_UP, &hdev->flags);
513 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900514 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 /* Init failed, cleanup */
516 tasklet_kill(&hdev->rx_task);
517 tasklet_kill(&hdev->tx_task);
518 tasklet_kill(&hdev->cmd_task);
519
520 skb_queue_purge(&hdev->cmd_q);
521 skb_queue_purge(&hdev->rx_q);
522
523 if (hdev->flush)
524 hdev->flush(hdev);
525
526 if (hdev->sent_cmd) {
527 kfree_skb(hdev->sent_cmd);
528 hdev->sent_cmd = NULL;
529 }
530
531 hdev->close(hdev);
532 hdev->flags = 0;
533 }
534
535done:
536 hci_req_unlock(hdev);
537 hci_dev_put(hdev);
538 return ret;
539}
540
541static int hci_dev_do_close(struct hci_dev *hdev)
542{
543 BT_DBG("%s %p", hdev->name, hdev);
544
545 hci_req_cancel(hdev, ENODEV);
546 hci_req_lock(hdev);
547
548 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
549 hci_req_unlock(hdev);
550 return 0;
551 }
552
553 /* Kill RX and TX tasks */
554 tasklet_kill(&hdev->rx_task);
555 tasklet_kill(&hdev->tx_task);
556
557 hci_dev_lock_bh(hdev);
558 inquiry_cache_flush(hdev);
559 hci_conn_hash_flush(hdev);
560 hci_dev_unlock_bh(hdev);
561
562 hci_notify(hdev, HCI_DEV_DOWN);
563
564 if (hdev->flush)
565 hdev->flush(hdev);
566
567 /* Reset device */
568 skb_queue_purge(&hdev->cmd_q);
569 atomic_set(&hdev->cmd_cnt, 1);
570 if (!test_bit(HCI_RAW, &hdev->flags)) {
571 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200572 __hci_request(hdev, hci_reset_req, 0,
573 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 clear_bit(HCI_INIT, &hdev->flags);
575 }
576
577 /* Kill cmd task */
578 tasklet_kill(&hdev->cmd_task);
579
580 /* Drop queues */
581 skb_queue_purge(&hdev->rx_q);
582 skb_queue_purge(&hdev->cmd_q);
583 skb_queue_purge(&hdev->raw_q);
584
585 /* Drop last sent command */
586 if (hdev->sent_cmd) {
587 kfree_skb(hdev->sent_cmd);
588 hdev->sent_cmd = NULL;
589 }
590
591 /* After this point our queues are empty
592 * and no tasks are scheduled. */
593 hdev->close(hdev);
594
595 /* Clear flags */
596 hdev->flags = 0;
597
598 hci_req_unlock(hdev);
599
600 hci_dev_put(hdev);
601 return 0;
602}
603
604int hci_dev_close(__u16 dev)
605{
606 struct hci_dev *hdev;
607 int err;
608
609 if (!(hdev = hci_dev_get(dev)))
610 return -ENODEV;
611 err = hci_dev_do_close(hdev);
612 hci_dev_put(hdev);
613 return err;
614}
615
616int hci_dev_reset(__u16 dev)
617{
618 struct hci_dev *hdev;
619 int ret = 0;
620
621 if (!(hdev = hci_dev_get(dev)))
622 return -ENODEV;
623
624 hci_req_lock(hdev);
625 tasklet_disable(&hdev->tx_task);
626
627 if (!test_bit(HCI_UP, &hdev->flags))
628 goto done;
629
630 /* Drop queues */
631 skb_queue_purge(&hdev->rx_q);
632 skb_queue_purge(&hdev->cmd_q);
633
634 hci_dev_lock_bh(hdev);
635 inquiry_cache_flush(hdev);
636 hci_conn_hash_flush(hdev);
637 hci_dev_unlock_bh(hdev);
638
639 if (hdev->flush)
640 hdev->flush(hdev);
641
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900642 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
644
645 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200646 ret = __hci_request(hdev, hci_reset_req, 0,
647 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
649done:
650 tasklet_enable(&hdev->tx_task);
651 hci_req_unlock(hdev);
652 hci_dev_put(hdev);
653 return ret;
654}
655
656int hci_dev_reset_stat(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
661 if (!(hdev = hci_dev_get(dev)))
662 return -ENODEV;
663
664 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
665
666 hci_dev_put(hdev);
667
668 return ret;
669}
670
671int hci_dev_cmd(unsigned int cmd, void __user *arg)
672{
673 struct hci_dev *hdev;
674 struct hci_dev_req dr;
675 int err = 0;
676
677 if (copy_from_user(&dr, arg, sizeof(dr)))
678 return -EFAULT;
679
680 if (!(hdev = hci_dev_get(dr.dev_id)))
681 return -ENODEV;
682
683 switch (cmd) {
684 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200685 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 break;
688
689 case HCISETENCRYPT:
690 if (!lmp_encrypt_capable(hdev)) {
691 err = -EOPNOTSUPP;
692 break;
693 }
694
695 if (!test_bit(HCI_AUTH, &hdev->flags)) {
696 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200697 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (err)
700 break;
701 }
702
Marcel Holtmann04837f62006-07-03 10:02:33 +0200703 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 break;
706
707 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200708 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 break;
711
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200712 case HCISETLINKPOL:
713 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
715 break;
716
717 case HCISETLINKMODE:
718 hdev->link_mode = ((__u16) dr.dev_opt) &
719 (HCI_LM_MASTER | HCI_LM_ACCEPT);
720 break;
721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 case HCISETPTYPE:
723 hdev->pkt_type = (__u16) dr.dev_opt;
724 break;
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200727 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
728 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 break;
730
731 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200732 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
733 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 break;
735
736 default:
737 err = -EINVAL;
738 break;
739 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 hci_dev_put(hdev);
742 return err;
743}
744
745int hci_get_dev_list(void __user *arg)
746{
747 struct hci_dev_list_req *dl;
748 struct hci_dev_req *dr;
749 struct list_head *p;
750 int n = 0, size, err;
751 __u16 dev_num;
752
753 if (get_user(dev_num, (__u16 __user *) arg))
754 return -EFAULT;
755
756 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
757 return -EINVAL;
758
759 size = sizeof(*dl) + dev_num * sizeof(*dr);
760
Vegard Nossumc6bf5142008-11-30 12:17:19 +0100761 if (!(dl = kzalloc(size, GFP_KERNEL)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 return -ENOMEM;
763
764 dr = dl->dev_req;
765
766 read_lock_bh(&hci_dev_list_lock);
767 list_for_each(p, &hci_dev_list) {
768 struct hci_dev *hdev;
769 hdev = list_entry(p, struct hci_dev, list);
770 (dr + n)->dev_id = hdev->id;
771 (dr + n)->dev_opt = hdev->flags;
772 if (++n >= dev_num)
773 break;
774 }
775 read_unlock_bh(&hci_dev_list_lock);
776
777 dl->dev_num = n;
778 size = sizeof(*dl) + n * sizeof(*dr);
779
780 err = copy_to_user(arg, dl, size);
781 kfree(dl);
782
783 return err ? -EFAULT : 0;
784}
785
786int hci_get_dev_info(void __user *arg)
787{
788 struct hci_dev *hdev;
789 struct hci_dev_info di;
790 int err = 0;
791
792 if (copy_from_user(&di, arg, sizeof(di)))
793 return -EFAULT;
794
795 if (!(hdev = hci_dev_get(di.dev_id)))
796 return -ENODEV;
797
798 strcpy(di.name, hdev->name);
799 di.bdaddr = hdev->bdaddr;
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100800 di.type = hdev->bus;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 di.flags = hdev->flags;
802 di.pkt_type = hdev->pkt_type;
803 di.acl_mtu = hdev->acl_mtu;
804 di.acl_pkts = hdev->acl_pkts;
805 di.sco_mtu = hdev->sco_mtu;
806 di.sco_pkts = hdev->sco_pkts;
807 di.link_policy = hdev->link_policy;
808 di.link_mode = hdev->link_mode;
809
810 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
811 memcpy(&di.features, &hdev->features, sizeof(di.features));
812
813 if (copy_to_user(arg, &di, sizeof(di)))
814 err = -EFAULT;
815
816 hci_dev_put(hdev);
817
818 return err;
819}
820
821/* ---- Interface to HCI drivers ---- */
822
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200823static int hci_rfkill_set_block(void *data, bool blocked)
824{
825 struct hci_dev *hdev = data;
826
827 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
828
829 if (!blocked)
830 return 0;
831
832 hci_dev_do_close(hdev);
833
834 return 0;
835}
836
837static const struct rfkill_ops hci_rfkill_ops = {
838 .set_block = hci_rfkill_set_block,
839};
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841/* Alloc HCI device */
842struct hci_dev *hci_alloc_dev(void)
843{
844 struct hci_dev *hdev;
845
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200846 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (!hdev)
848 return NULL;
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 skb_queue_head_init(&hdev->driver_init);
851
852 return hdev;
853}
854EXPORT_SYMBOL(hci_alloc_dev);
855
856/* Free HCI device */
857void hci_free_dev(struct hci_dev *hdev)
858{
859 skb_queue_purge(&hdev->driver_init);
860
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200861 /* will free via device release */
862 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863}
864EXPORT_SYMBOL(hci_free_dev);
865
866/* Register HCI device */
867int hci_register_dev(struct hci_dev *hdev)
868{
869 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200870 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100872 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
873 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 if (!hdev->open || !hdev->close || !hdev->destruct)
876 return -EINVAL;
877
878 write_lock_bh(&hci_dev_list_lock);
879
880 /* Find first available device id */
881 list_for_each(p, &hci_dev_list) {
882 if (list_entry(p, struct hci_dev, list)->id != id)
883 break;
884 head = p; id++;
885 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 sprintf(hdev->name, "hci%d", id);
888 hdev->id = id;
889 list_add(&hdev->list, head);
890
891 atomic_set(&hdev->refcnt, 1);
892 spin_lock_init(&hdev->lock);
893
894 hdev->flags = 0;
895 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200896 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 hdev->link_mode = (HCI_LM_ACCEPT);
898
Marcel Holtmann04837f62006-07-03 10:02:33 +0200899 hdev->idle_timeout = 0;
900 hdev->sniff_max_interval = 800;
901 hdev->sniff_min_interval = 80;
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
904 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
905 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
906
907 skb_queue_head_init(&hdev->rx_q);
908 skb_queue_head_init(&hdev->cmd_q);
909 skb_queue_head_init(&hdev->raw_q);
910
Marcel Holtmannef222012007-07-11 06:42:04 +0200911 for (i = 0; i < 3; i++)
912 hdev->reassembly[i] = NULL;
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000915 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916
917 inquiry_cache_init(hdev);
918
919 hci_conn_hash_init(hdev);
920
921 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
922
923 atomic_set(&hdev->promisc, 0);
924
925 write_unlock_bh(&hci_dev_list_lock);
926
927 hci_register_sysfs(hdev);
928
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200929 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
930 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
931 if (hdev->rfkill) {
932 if (rfkill_register(hdev->rfkill) < 0) {
933 rfkill_destroy(hdev->rfkill);
934 hdev->rfkill = NULL;
935 }
936 }
937
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 hci_notify(hdev, HCI_DEV_REG);
939
940 return id;
941}
942EXPORT_SYMBOL(hci_register_dev);
943
944/* Unregister HCI device */
945int hci_unregister_dev(struct hci_dev *hdev)
946{
Marcel Holtmannef222012007-07-11 06:42:04 +0200947 int i;
948
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100949 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 write_lock_bh(&hci_dev_list_lock);
952 list_del(&hdev->list);
953 write_unlock_bh(&hci_dev_list_lock);
954
955 hci_dev_do_close(hdev);
956
Marcel Holtmannef222012007-07-11 06:42:04 +0200957 for (i = 0; i < 3; i++)
958 kfree_skb(hdev->reassembly[i]);
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 hci_notify(hdev, HCI_DEV_UNREG);
961
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200962 if (hdev->rfkill) {
963 rfkill_unregister(hdev->rfkill);
964 rfkill_destroy(hdev->rfkill);
965 }
966
Dave Young147e2d52008-03-05 18:45:59 -0800967 hci_unregister_sysfs(hdev);
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +0200970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 return 0;
972}
973EXPORT_SYMBOL(hci_unregister_dev);
974
975/* Suspend HCI device */
976int hci_suspend_dev(struct hci_dev *hdev)
977{
978 hci_notify(hdev, HCI_DEV_SUSPEND);
979 return 0;
980}
981EXPORT_SYMBOL(hci_suspend_dev);
982
983/* Resume HCI device */
984int hci_resume_dev(struct hci_dev *hdev)
985{
986 hci_notify(hdev, HCI_DEV_RESUME);
987 return 0;
988}
989EXPORT_SYMBOL(hci_resume_dev);
990
Marcel Holtmann76bca882009-11-18 00:40:39 +0100991/* Receive frame from HCI drivers */
992int hci_recv_frame(struct sk_buff *skb)
993{
994 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
995 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
996 && !test_bit(HCI_INIT, &hdev->flags))) {
997 kfree_skb(skb);
998 return -ENXIO;
999 }
1000
1001 /* Incomming skb */
1002 bt_cb(skb)->incoming = 1;
1003
1004 /* Time stamp */
1005 __net_timestamp(skb);
1006
1007 /* Queue frame for rx task */
1008 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001009 tasklet_schedule(&hdev->rx_task);
1010
Marcel Holtmann76bca882009-11-18 00:40:39 +01001011 return 0;
1012}
1013EXPORT_SYMBOL(hci_recv_frame);
1014
Marcel Holtmannef222012007-07-11 06:42:04 +02001015/* Receive packet type fragment */
1016#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
1017
1018int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1019{
1020 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1021 return -EILSEQ;
1022
1023 while (count) {
1024 struct sk_buff *skb = __reassembly(hdev, type);
1025 struct { int expect; } *scb;
1026 int len = 0;
1027
1028 if (!skb) {
1029 /* Start of the frame */
1030
1031 switch (type) {
1032 case HCI_EVENT_PKT:
1033 if (count >= HCI_EVENT_HDR_SIZE) {
1034 struct hci_event_hdr *h = data;
1035 len = HCI_EVENT_HDR_SIZE + h->plen;
1036 } else
1037 return -EILSEQ;
1038 break;
1039
1040 case HCI_ACLDATA_PKT:
1041 if (count >= HCI_ACL_HDR_SIZE) {
1042 struct hci_acl_hdr *h = data;
1043 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1044 } else
1045 return -EILSEQ;
1046 break;
1047
1048 case HCI_SCODATA_PKT:
1049 if (count >= HCI_SCO_HDR_SIZE) {
1050 struct hci_sco_hdr *h = data;
1051 len = HCI_SCO_HDR_SIZE + h->dlen;
1052 } else
1053 return -EILSEQ;
1054 break;
1055 }
1056
1057 skb = bt_skb_alloc(len, GFP_ATOMIC);
1058 if (!skb) {
1059 BT_ERR("%s no memory for packet", hdev->name);
1060 return -ENOMEM;
1061 }
1062
1063 skb->dev = (void *) hdev;
1064 bt_cb(skb)->pkt_type = type;
YOSHIFUJI Hideaki00ae02f2007-07-19 10:43:16 +09001065
Marcel Holtmannef222012007-07-11 06:42:04 +02001066 __reassembly(hdev, type) = skb;
1067
1068 scb = (void *) skb->cb;
1069 scb->expect = len;
1070 } else {
1071 /* Continuation */
1072
1073 scb = (void *) skb->cb;
1074 len = scb->expect;
1075 }
1076
1077 len = min(len, count);
1078
1079 memcpy(skb_put(skb, len), data, len);
1080
1081 scb->expect -= len;
1082
1083 if (scb->expect == 0) {
1084 /* Complete frame */
1085
1086 __reassembly(hdev, type) = NULL;
1087
1088 bt_cb(skb)->pkt_type = type;
1089 hci_recv_frame(skb);
1090 }
1091
1092 count -= len; data += len;
1093 }
1094
1095 return 0;
1096}
1097EXPORT_SYMBOL(hci_recv_fragment);
1098
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099/* ---- Interface to upper protocols ---- */
1100
1101/* Register/Unregister protocols.
1102 * hci_task_lock is used to ensure that no tasks are running. */
1103int hci_register_proto(struct hci_proto *hp)
1104{
1105 int err = 0;
1106
1107 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1108
1109 if (hp->id >= HCI_MAX_PROTO)
1110 return -EINVAL;
1111
1112 write_lock_bh(&hci_task_lock);
1113
1114 if (!hci_proto[hp->id])
1115 hci_proto[hp->id] = hp;
1116 else
1117 err = -EEXIST;
1118
1119 write_unlock_bh(&hci_task_lock);
1120
1121 return err;
1122}
1123EXPORT_SYMBOL(hci_register_proto);
1124
1125int hci_unregister_proto(struct hci_proto *hp)
1126{
1127 int err = 0;
1128
1129 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1130
1131 if (hp->id >= HCI_MAX_PROTO)
1132 return -EINVAL;
1133
1134 write_lock_bh(&hci_task_lock);
1135
1136 if (hci_proto[hp->id])
1137 hci_proto[hp->id] = NULL;
1138 else
1139 err = -ENOENT;
1140
1141 write_unlock_bh(&hci_task_lock);
1142
1143 return err;
1144}
1145EXPORT_SYMBOL(hci_unregister_proto);
1146
1147int hci_register_cb(struct hci_cb *cb)
1148{
1149 BT_DBG("%p name %s", cb, cb->name);
1150
1151 write_lock_bh(&hci_cb_list_lock);
1152 list_add(&cb->list, &hci_cb_list);
1153 write_unlock_bh(&hci_cb_list_lock);
1154
1155 return 0;
1156}
1157EXPORT_SYMBOL(hci_register_cb);
1158
1159int hci_unregister_cb(struct hci_cb *cb)
1160{
1161 BT_DBG("%p name %s", cb, cb->name);
1162
1163 write_lock_bh(&hci_cb_list_lock);
1164 list_del(&cb->list);
1165 write_unlock_bh(&hci_cb_list_lock);
1166
1167 return 0;
1168}
1169EXPORT_SYMBOL(hci_unregister_cb);
1170
1171static int hci_send_frame(struct sk_buff *skb)
1172{
1173 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1174
1175 if (!hdev) {
1176 kfree_skb(skb);
1177 return -ENODEV;
1178 }
1179
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001180 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 if (atomic_read(&hdev->promisc)) {
1183 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001184 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185
1186 hci_send_to_sock(hdev, skb);
1187 }
1188
1189 /* Get rid of skb owner, prior to sending to the driver. */
1190 skb_orphan(skb);
1191
1192 return hdev->send(skb);
1193}
1194
1195/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001196int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197{
1198 int len = HCI_COMMAND_HDR_SIZE + plen;
1199 struct hci_command_hdr *hdr;
1200 struct sk_buff *skb;
1201
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001202 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
1204 skb = bt_skb_alloc(len, GFP_ATOMIC);
1205 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001206 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 return -ENOMEM;
1208 }
1209
1210 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001211 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 hdr->plen = plen;
1213
1214 if (plen)
1215 memcpy(skb_put(skb, plen), param, plen);
1216
1217 BT_DBG("skb len %d", skb->len);
1218
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001219 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001223 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224
1225 return 0;
1226}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001229void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
1231 struct hci_command_hdr *hdr;
1232
1233 if (!hdev->sent_cmd)
1234 return NULL;
1235
1236 hdr = (void *) hdev->sent_cmd->data;
1237
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001238 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 return NULL;
1240
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001241 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242
1243 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1244}
1245
1246/* Send ACL data */
1247static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1248{
1249 struct hci_acl_hdr *hdr;
1250 int len = skb->len;
1251
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001252 skb_push(skb, HCI_ACL_HDR_SIZE);
1253 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001254 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001255 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1256 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257}
1258
1259int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1260{
1261 struct hci_dev *hdev = conn->hdev;
1262 struct sk_buff *list;
1263
1264 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1265
1266 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001267 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1269
1270 if (!(list = skb_shinfo(skb)->frag_list)) {
1271 /* Non fragmented */
1272 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1273
1274 skb_queue_tail(&conn->data_q, skb);
1275 } else {
1276 /* Fragmented */
1277 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1278
1279 skb_shinfo(skb)->frag_list = NULL;
1280
1281 /* Queue all fragments atomically */
1282 spin_lock_bh(&conn->data_q.lock);
1283
1284 __skb_queue_tail(&conn->data_q, skb);
1285 do {
1286 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001289 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1291
1292 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1293
1294 __skb_queue_tail(&conn->data_q, skb);
1295 } while (list);
1296
1297 spin_unlock_bh(&conn->data_q.lock);
1298 }
1299
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001300 tasklet_schedule(&hdev->tx_task);
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return 0;
1303}
1304EXPORT_SYMBOL(hci_send_acl);
1305
1306/* Send SCO data */
1307int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1308{
1309 struct hci_dev *hdev = conn->hdev;
1310 struct hci_sco_hdr hdr;
1311
1312 BT_DBG("%s len %d", hdev->name, skb->len);
1313
1314 if (skb->len > hdev->sco_mtu) {
1315 kfree_skb(skb);
1316 return -EINVAL;
1317 }
1318
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001319 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 hdr.dlen = skb->len;
1321
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001322 skb_push(skb, HCI_SCO_HDR_SIZE);
1323 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001330 tasklet_schedule(&hdev->tx_task);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return 0;
1333}
1334EXPORT_SYMBOL(hci_send_sco);
1335
1336/* ---- HCI TX task (outgoing data) ---- */
1337
1338/* HCI Connection scheduler */
1339static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1340{
1341 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001342 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 int num = 0, min = ~0;
1344 struct list_head *p;
1345
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001346 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 * added and removed with TX task disabled. */
1348 list_for_each(p, &h->list) {
1349 struct hci_conn *c;
1350 c = list_entry(p, struct hci_conn, list);
1351
Marcel Holtmann769be972008-07-14 20:13:49 +02001352 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001354
1355 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1356 continue;
1357
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 num++;
1359
1360 if (c->sent < min) {
1361 min = c->sent;
1362 conn = c;
1363 }
1364 }
1365
1366 if (conn) {
1367 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1368 int q = cnt / num;
1369 *quote = q ? q : 1;
1370 } else
1371 *quote = 0;
1372
1373 BT_DBG("conn %p quote %d", conn, *quote);
1374 return conn;
1375}
1376
1377static inline void hci_acl_tx_to(struct hci_dev *hdev)
1378{
1379 struct hci_conn_hash *h = &hdev->conn_hash;
1380 struct list_head *p;
1381 struct hci_conn *c;
1382
1383 BT_ERR("%s ACL tx timeout", hdev->name);
1384
1385 /* Kill stalled connections */
1386 list_for_each(p, &h->list) {
1387 c = list_entry(p, struct hci_conn, list);
1388 if (c->type == ACL_LINK && c->sent) {
1389 BT_ERR("%s killing stalled ACL connection %s",
1390 hdev->name, batostr(&c->dst));
1391 hci_acl_disconn(c, 0x13);
1392 }
1393 }
1394}
1395
1396static inline void hci_sched_acl(struct hci_dev *hdev)
1397{
1398 struct hci_conn *conn;
1399 struct sk_buff *skb;
1400 int quote;
1401
1402 BT_DBG("%s", hdev->name);
1403
1404 if (!test_bit(HCI_RAW, &hdev->flags)) {
1405 /* ACL tx timeout must be longer than maximum
1406 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001407 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 hci_acl_tx_to(hdev);
1409 }
1410
1411 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1412 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1413 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001414
1415 hci_conn_enter_active_mode(conn);
1416
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 hci_send_frame(skb);
1418 hdev->acl_last_tx = jiffies;
1419
1420 hdev->acl_cnt--;
1421 conn->sent++;
1422 }
1423 }
1424}
1425
1426/* Schedule SCO */
1427static inline void hci_sched_sco(struct hci_dev *hdev)
1428{
1429 struct hci_conn *conn;
1430 struct sk_buff *skb;
1431 int quote;
1432
1433 BT_DBG("%s", hdev->name);
1434
1435 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1436 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1437 BT_DBG("skb %p len %d", skb, skb->len);
1438 hci_send_frame(skb);
1439
1440 conn->sent++;
1441 if (conn->sent == ~0)
1442 conn->sent = 0;
1443 }
1444 }
1445}
1446
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001447static inline void hci_sched_esco(struct hci_dev *hdev)
1448{
1449 struct hci_conn *conn;
1450 struct sk_buff *skb;
1451 int quote;
1452
1453 BT_DBG("%s", hdev->name);
1454
1455 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1456 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1457 BT_DBG("skb %p len %d", skb, skb->len);
1458 hci_send_frame(skb);
1459
1460 conn->sent++;
1461 if (conn->sent == ~0)
1462 conn->sent = 0;
1463 }
1464 }
1465}
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467static void hci_tx_task(unsigned long arg)
1468{
1469 struct hci_dev *hdev = (struct hci_dev *) arg;
1470 struct sk_buff *skb;
1471
1472 read_lock(&hci_task_lock);
1473
1474 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1475
1476 /* Schedule queues and send stuff to HCI driver */
1477
1478 hci_sched_acl(hdev);
1479
1480 hci_sched_sco(hdev);
1481
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001482 hci_sched_esco(hdev);
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 /* Send next queued raw (unknown type) packet */
1485 while ((skb = skb_dequeue(&hdev->raw_q)))
1486 hci_send_frame(skb);
1487
1488 read_unlock(&hci_task_lock);
1489}
1490
1491/* ----- HCI RX task (incoming data proccessing) ----- */
1492
1493/* ACL data packet */
1494static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1495{
1496 struct hci_acl_hdr *hdr = (void *) skb->data;
1497 struct hci_conn *conn;
1498 __u16 handle, flags;
1499
1500 skb_pull(skb, HCI_ACL_HDR_SIZE);
1501
1502 handle = __le16_to_cpu(hdr->handle);
1503 flags = hci_flags(handle);
1504 handle = hci_handle(handle);
1505
1506 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1507
1508 hdev->stat.acl_rx++;
1509
1510 hci_dev_lock(hdev);
1511 conn = hci_conn_hash_lookup_handle(hdev, handle);
1512 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 if (conn) {
1515 register struct hci_proto *hp;
1516
Marcel Holtmann04837f62006-07-03 10:02:33 +02001517 hci_conn_enter_active_mode(conn);
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 /* Send to upper protocol */
1520 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1521 hp->recv_acldata(conn, skb, flags);
1522 return;
1523 }
1524 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001525 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 hdev->name, handle);
1527 }
1528
1529 kfree_skb(skb);
1530}
1531
1532/* SCO data packet */
1533static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1534{
1535 struct hci_sco_hdr *hdr = (void *) skb->data;
1536 struct hci_conn *conn;
1537 __u16 handle;
1538
1539 skb_pull(skb, HCI_SCO_HDR_SIZE);
1540
1541 handle = __le16_to_cpu(hdr->handle);
1542
1543 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1544
1545 hdev->stat.sco_rx++;
1546
1547 hci_dev_lock(hdev);
1548 conn = hci_conn_hash_lookup_handle(hdev, handle);
1549 hci_dev_unlock(hdev);
1550
1551 if (conn) {
1552 register struct hci_proto *hp;
1553
1554 /* Send to upper protocol */
1555 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1556 hp->recv_scodata(conn, skb);
1557 return;
1558 }
1559 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001560 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 hdev->name, handle);
1562 }
1563
1564 kfree_skb(skb);
1565}
1566
Marcel Holtmann65164552005-10-28 19:20:48 +02001567static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
1569 struct hci_dev *hdev = (struct hci_dev *) arg;
1570 struct sk_buff *skb;
1571
1572 BT_DBG("%s", hdev->name);
1573
1574 read_lock(&hci_task_lock);
1575
1576 while ((skb = skb_dequeue(&hdev->rx_q))) {
1577 if (atomic_read(&hdev->promisc)) {
1578 /* Send copy to the sockets */
1579 hci_send_to_sock(hdev, skb);
1580 }
1581
1582 if (test_bit(HCI_RAW, &hdev->flags)) {
1583 kfree_skb(skb);
1584 continue;
1585 }
1586
1587 if (test_bit(HCI_INIT, &hdev->flags)) {
1588 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001589 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 case HCI_ACLDATA_PKT:
1591 case HCI_SCODATA_PKT:
1592 kfree_skb(skb);
1593 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
1596
1597 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001598 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 case HCI_EVENT_PKT:
1600 hci_event_packet(hdev, skb);
1601 break;
1602
1603 case HCI_ACLDATA_PKT:
1604 BT_DBG("%s ACL data packet", hdev->name);
1605 hci_acldata_packet(hdev, skb);
1606 break;
1607
1608 case HCI_SCODATA_PKT:
1609 BT_DBG("%s SCO data packet", hdev->name);
1610 hci_scodata_packet(hdev, skb);
1611 break;
1612
1613 default:
1614 kfree_skb(skb);
1615 break;
1616 }
1617 }
1618
1619 read_unlock(&hci_task_lock);
1620}
1621
1622static void hci_cmd_task(unsigned long arg)
1623{
1624 struct hci_dev *hdev = (struct hci_dev *) arg;
1625 struct sk_buff *skb;
1626
1627 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1628
S.Çağlar Onur82453022008-02-17 23:25:57 -08001629 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 BT_ERR("%s command tx timeout", hdev->name);
1631 atomic_set(&hdev->cmd_cnt, 1);
1632 }
1633
1634 /* Send queued commands */
1635 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001636 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1639 atomic_dec(&hdev->cmd_cnt);
1640 hci_send_frame(skb);
1641 hdev->cmd_last_tx = jiffies;
1642 } else {
1643 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001644 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 }
1646 }
1647}