blob: cd061510b6bd8356140c428a1187235e87ed31dc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
40#include <linux/interrupt.h>
41#include <linux/notifier.h>
42#include <net/sock.h>
43
44#include <asm/system.h>
45#include <asm/uaccess.h>
46#include <asm/unaligned.h>
47
48#include <net/bluetooth/bluetooth.h>
49#include <net/bluetooth/hci_core.h>
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051static void hci_cmd_task(unsigned long arg);
52static void hci_rx_task(unsigned long arg);
53static void hci_tx_task(unsigned long arg);
54static void hci_notify(struct hci_dev *hdev, int event);
55
56static DEFINE_RWLOCK(hci_task_lock);
57
58/* HCI device list */
59LIST_HEAD(hci_dev_list);
60DEFINE_RWLOCK(hci_dev_list_lock);
61
62/* HCI callback list */
63LIST_HEAD(hci_cb_list);
64DEFINE_RWLOCK(hci_cb_list_lock);
65
66/* HCI protocols */
67#define HCI_MAX_PROTO 2
68struct hci_proto *hci_proto[HCI_MAX_PROTO];
69
70/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080071static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/* ---- HCI notifications ---- */
74
75int hci_register_notifier(struct notifier_block *nb)
76{
Alan Sterne041c682006-03-27 01:16:30 -080077 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070078}
79
80int hci_unregister_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Marcel Holtmann65164552005-10-28 19:20:48 +020085static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
Alan Sterne041c682006-03-27 01:16:30 -080087 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
90/* ---- HCI requests ---- */
91
92void hci_req_complete(struct hci_dev *hdev, int result)
93{
94 BT_DBG("%s result 0x%2.2x", hdev->name, result);
95
96 if (hdev->req_status == HCI_REQ_PEND) {
97 hdev->req_result = result;
98 hdev->req_status = HCI_REQ_DONE;
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
103static void hci_req_cancel(struct hci_dev *hdev, int err)
104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900115static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 unsigned long opt, __u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 int err = 0;
120
121 BT_DBG("%s start", hdev->name);
122
123 hdev->req_status = HCI_REQ_PEND;
124
125 add_wait_queue(&hdev->req_wait_q, &wait);
126 set_current_state(TASK_INTERRUPTIBLE);
127
128 req(hdev, opt);
129 schedule_timeout(timeout);
130
131 remove_wait_queue(&hdev->req_wait_q, &wait);
132
133 if (signal_pending(current))
134 return -EINTR;
135
136 switch (hdev->req_status) {
137 case HCI_REQ_DONE:
138 err = -bt_err(hdev->req_result);
139 break;
140
141 case HCI_REQ_CANCELED:
142 err = -hdev->req_result;
143 break;
144
145 default:
146 err = -ETIMEDOUT;
147 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700148 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 hdev->req_status = hdev->req_result = 0;
151
152 BT_DBG("%s end: err %d", hdev->name, err);
153
154 return err;
155}
156
157static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
158 unsigned long opt, __u32 timeout)
159{
160 int ret;
161
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200162 if (!test_bit(HCI_UP, &hdev->flags))
163 return -ENETDOWN;
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 /* Serialize all requests */
166 hci_req_lock(hdev);
167 ret = __hci_request(hdev, req, opt, timeout);
168 hci_req_unlock(hdev);
169
170 return ret;
171}
172
173static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
174{
175 BT_DBG("%s %ld", hdev->name, opt);
176
177 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
182{
183 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800184 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200185 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Driver initialization */
190
191 /* Special commands */
192 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700193 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 skb->dev = (void *) hdev;
195 skb_queue_tail(&hdev->cmd_q, skb);
196 hci_sched_cmd(hdev);
197 }
198 skb_queue_purge(&hdev->driver_init);
199
200 /* Mandatory initialization */
201
202 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100203 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200204 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215#if 0
216 /* Host buffer size */
217 {
218 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700219 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700221 cp.acl_max_pkt = cpu_to_le16(0xffff);
222 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 }
225#endif
226
227 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
229
230 /* Read Class of Device */
231 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
232
233 /* Read Local Name */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200237 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 /* Optional initialization */
240
241 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200242 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700246 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200247 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700250 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200251 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
254static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
255{
256 __u8 scan = opt;
257
258 BT_DBG("%s %x", hdev->name, scan);
259
260 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
264static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
265{
266 __u8 auth = opt;
267
268 BT_DBG("%s %x", hdev->name, auth);
269
270 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200271 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
274static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 encrypt = opt;
277
278 BT_DBG("%s %x", hdev->name, encrypt);
279
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200280 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200281 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200284static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __le16 policy = cpu_to_le16(opt);
287
Marcel Holtmanna418b892008-11-30 12:17:28 +0100288 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200289
290 /* Default link policy */
291 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
292}
293
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900294/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 * Device is held on return. */
296struct hci_dev *hci_dev_get(int index)
297{
298 struct hci_dev *hdev = NULL;
299 struct list_head *p;
300
301 BT_DBG("%d", index);
302
303 if (index < 0)
304 return NULL;
305
306 read_lock(&hci_dev_list_lock);
307 list_for_each(p, &hci_dev_list) {
308 struct hci_dev *d = list_entry(p, struct hci_dev, list);
309 if (d->id == index) {
310 hdev = hci_dev_hold(d);
311 break;
312 }
313 }
314 read_unlock(&hci_dev_list_lock);
315 return hdev;
316}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318/* ---- Inquiry support ---- */
319static void inquiry_cache_flush(struct hci_dev *hdev)
320{
321 struct inquiry_cache *cache = &hdev->inq_cache;
322 struct inquiry_entry *next = cache->list, *e;
323
324 BT_DBG("cache %p", cache);
325
326 cache->list = NULL;
327 while ((e = next)) {
328 next = e->next;
329 kfree(e);
330 }
331}
332
333struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *e;
337
338 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
339
340 for (e = cache->list; e; e = e->next)
341 if (!bacmp(&e->data.bdaddr, bdaddr))
342 break;
343 return e;
344}
345
346void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
347{
348 struct inquiry_cache *cache = &hdev->inq_cache;
349 struct inquiry_entry *e;
350
351 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
352
353 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
354 /* Entry not in the cache. Add new one. */
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200355 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 e->next = cache->list;
358 cache->list = e;
359 }
360
361 memcpy(&e->data, data, sizeof(*data));
362 e->timestamp = jiffies;
363 cache->timestamp = jiffies;
364}
365
366static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
369 struct inquiry_info *info = (struct inquiry_info *) buf;
370 struct inquiry_entry *e;
371 int copied = 0;
372
373 for (e = cache->list; e && copied < num; e = e->next, copied++) {
374 struct inquiry_data *data = &e->data;
375 bacpy(&info->bdaddr, &data->bdaddr);
376 info->pscan_rep_mode = data->pscan_rep_mode;
377 info->pscan_period_mode = data->pscan_period_mode;
378 info->pscan_mode = data->pscan_mode;
379 memcpy(info->dev_class, data->dev_class, 3);
380 info->clock_offset = data->clock_offset;
381 info++;
382 }
383
384 BT_DBG("cache %p, copied %d", cache, copied);
385 return copied;
386}
387
388static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
389{
390 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
391 struct hci_cp_inquiry cp;
392
393 BT_DBG("%s", hdev->name);
394
395 if (test_bit(HCI_INQUIRY, &hdev->flags))
396 return;
397
398 /* Start Inquiry */
399 memcpy(&cp.lap, &ir->lap, 3);
400 cp.length = ir->length;
401 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200402 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403}
404
405int hci_inquiry(void __user *arg)
406{
407 __u8 __user *ptr = arg;
408 struct hci_inquiry_req ir;
409 struct hci_dev *hdev;
410 int err = 0, do_inquiry = 0, max_rsp;
411 long timeo;
412 __u8 *buf;
413
414 if (copy_from_user(&ir, ptr, sizeof(ir)))
415 return -EFAULT;
416
417 if (!(hdev = hci_dev_get(ir.dev_id)))
418 return -ENODEV;
419
420 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900421 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 inquiry_cache_empty(hdev) ||
423 ir.flags & IREQ_CACHE_FLUSH) {
424 inquiry_cache_flush(hdev);
425 do_inquiry = 1;
426 }
427 hci_dev_unlock_bh(hdev);
428
Marcel Holtmann04837f62006-07-03 10:02:33 +0200429 timeo = ir.length * msecs_to_jiffies(2000);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
431 goto done;
432
433 /* for unlimited number of responses we will use buffer with 255 entries */
434 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
435
436 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
437 * copy it to the user space.
438 */
439 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
440 err = -ENOMEM;
441 goto done;
442 }
443
444 hci_dev_lock_bh(hdev);
445 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
446 hci_dev_unlock_bh(hdev);
447
448 BT_DBG("num_rsp %d", ir.num_rsp);
449
450 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
451 ptr += sizeof(ir);
452 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
453 ir.num_rsp))
454 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900455 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 err = -EFAULT;
457
458 kfree(buf);
459
460done:
461 hci_dev_put(hdev);
462 return err;
463}
464
465/* ---- HCI ioctl helpers ---- */
466
467int hci_dev_open(__u16 dev)
468{
469 struct hci_dev *hdev;
470 int ret = 0;
471
472 if (!(hdev = hci_dev_get(dev)))
473 return -ENODEV;
474
475 BT_DBG("%s %p", hdev->name, hdev);
476
477 hci_req_lock(hdev);
478
479 if (test_bit(HCI_UP, &hdev->flags)) {
480 ret = -EALREADY;
481 goto done;
482 }
483
484 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
485 set_bit(HCI_RAW, &hdev->flags);
486
487 if (hdev->open(hdev)) {
488 ret = -EIO;
489 goto done;
490 }
491
492 if (!test_bit(HCI_RAW, &hdev->flags)) {
493 atomic_set(&hdev->cmd_cnt, 1);
494 set_bit(HCI_INIT, &hdev->flags);
495
496 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200497 ret = __hci_request(hdev, hci_init_req, 0,
498 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
500 clear_bit(HCI_INIT, &hdev->flags);
501 }
502
503 if (!ret) {
504 hci_dev_hold(hdev);
505 set_bit(HCI_UP, &hdev->flags);
506 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900507 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 /* Init failed, cleanup */
509 tasklet_kill(&hdev->rx_task);
510 tasklet_kill(&hdev->tx_task);
511 tasklet_kill(&hdev->cmd_task);
512
513 skb_queue_purge(&hdev->cmd_q);
514 skb_queue_purge(&hdev->rx_q);
515
516 if (hdev->flush)
517 hdev->flush(hdev);
518
519 if (hdev->sent_cmd) {
520 kfree_skb(hdev->sent_cmd);
521 hdev->sent_cmd = NULL;
522 }
523
524 hdev->close(hdev);
525 hdev->flags = 0;
526 }
527
528done:
529 hci_req_unlock(hdev);
530 hci_dev_put(hdev);
531 return ret;
532}
533
534static int hci_dev_do_close(struct hci_dev *hdev)
535{
536 BT_DBG("%s %p", hdev->name, hdev);
537
538 hci_req_cancel(hdev, ENODEV);
539 hci_req_lock(hdev);
540
541 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
542 hci_req_unlock(hdev);
543 return 0;
544 }
545
546 /* Kill RX and TX tasks */
547 tasklet_kill(&hdev->rx_task);
548 tasklet_kill(&hdev->tx_task);
549
550 hci_dev_lock_bh(hdev);
551 inquiry_cache_flush(hdev);
552 hci_conn_hash_flush(hdev);
553 hci_dev_unlock_bh(hdev);
554
555 hci_notify(hdev, HCI_DEV_DOWN);
556
557 if (hdev->flush)
558 hdev->flush(hdev);
559
560 /* Reset device */
561 skb_queue_purge(&hdev->cmd_q);
562 atomic_set(&hdev->cmd_cnt, 1);
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200565 __hci_request(hdev, hci_reset_req, 0,
566 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 clear_bit(HCI_INIT, &hdev->flags);
568 }
569
570 /* Kill cmd task */
571 tasklet_kill(&hdev->cmd_task);
572
573 /* Drop queues */
574 skb_queue_purge(&hdev->rx_q);
575 skb_queue_purge(&hdev->cmd_q);
576 skb_queue_purge(&hdev->raw_q);
577
578 /* Drop last sent command */
579 if (hdev->sent_cmd) {
580 kfree_skb(hdev->sent_cmd);
581 hdev->sent_cmd = NULL;
582 }
583
584 /* After this point our queues are empty
585 * and no tasks are scheduled. */
586 hdev->close(hdev);
587
588 /* Clear flags */
589 hdev->flags = 0;
590
591 hci_req_unlock(hdev);
592
593 hci_dev_put(hdev);
594 return 0;
595}
596
597int hci_dev_close(__u16 dev)
598{
599 struct hci_dev *hdev;
600 int err;
601
602 if (!(hdev = hci_dev_get(dev)))
603 return -ENODEV;
604 err = hci_dev_do_close(hdev);
605 hci_dev_put(hdev);
606 return err;
607}
608
609int hci_dev_reset(__u16 dev)
610{
611 struct hci_dev *hdev;
612 int ret = 0;
613
614 if (!(hdev = hci_dev_get(dev)))
615 return -ENODEV;
616
617 hci_req_lock(hdev);
618 tasklet_disable(&hdev->tx_task);
619
620 if (!test_bit(HCI_UP, &hdev->flags))
621 goto done;
622
623 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626
627 hci_dev_lock_bh(hdev);
628 inquiry_cache_flush(hdev);
629 hci_conn_hash_flush(hdev);
630 hci_dev_unlock_bh(hdev);
631
632 if (hdev->flush)
633 hdev->flush(hdev);
634
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
637
638 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200639 ret = __hci_request(hdev, hci_reset_req, 0,
640 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642done:
643 tasklet_enable(&hdev->tx_task);
644 hci_req_unlock(hdev);
645 hci_dev_put(hdev);
646 return ret;
647}
648
649int hci_dev_reset_stat(__u16 dev)
650{
651 struct hci_dev *hdev;
652 int ret = 0;
653
654 if (!(hdev = hci_dev_get(dev)))
655 return -ENODEV;
656
657 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
658
659 hci_dev_put(hdev);
660
661 return ret;
662}
663
664int hci_dev_cmd(unsigned int cmd, void __user *arg)
665{
666 struct hci_dev *hdev;
667 struct hci_dev_req dr;
668 int err = 0;
669
670 if (copy_from_user(&dr, arg, sizeof(dr)))
671 return -EFAULT;
672
673 if (!(hdev = hci_dev_get(dr.dev_id)))
674 return -ENODEV;
675
676 switch (cmd) {
677 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200678 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 break;
681
682 case HCISETENCRYPT:
683 if (!lmp_encrypt_capable(hdev)) {
684 err = -EOPNOTSUPP;
685 break;
686 }
687
688 if (!test_bit(HCI_AUTH, &hdev->flags)) {
689 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200690 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
691 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (err)
693 break;
694 }
695
Marcel Holtmann04837f62006-07-03 10:02:33 +0200696 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 break;
699
700 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200701 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 break;
704
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200705 case HCISETLINKPOL:
706 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708 break;
709
710 case HCISETLINKMODE:
711 hdev->link_mode = ((__u16) dr.dev_opt) &
712 (HCI_LM_MASTER | HCI_LM_ACCEPT);
713 break;
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 case HCISETPTYPE:
716 hdev->pkt_type = (__u16) dr.dev_opt;
717 break;
718
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200720 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
721 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 break;
723
724 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200725 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
726 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728
729 default:
730 err = -EINVAL;
731 break;
732 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200733
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 hci_dev_put(hdev);
735 return err;
736}
737
738int hci_get_dev_list(void __user *arg)
739{
740 struct hci_dev_list_req *dl;
741 struct hci_dev_req *dr;
742 struct list_head *p;
743 int n = 0, size, err;
744 __u16 dev_num;
745
746 if (get_user(dev_num, (__u16 __user *) arg))
747 return -EFAULT;
748
749 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
750 return -EINVAL;
751
752 size = sizeof(*dl) + dev_num * sizeof(*dr);
753
Vegard Nossumc6bf5142008-11-30 12:17:19 +0100754 if (!(dl = kzalloc(size, GFP_KERNEL)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 return -ENOMEM;
756
757 dr = dl->dev_req;
758
759 read_lock_bh(&hci_dev_list_lock);
760 list_for_each(p, &hci_dev_list) {
761 struct hci_dev *hdev;
762 hdev = list_entry(p, struct hci_dev, list);
763 (dr + n)->dev_id = hdev->id;
764 (dr + n)->dev_opt = hdev->flags;
765 if (++n >= dev_num)
766 break;
767 }
768 read_unlock_bh(&hci_dev_list_lock);
769
770 dl->dev_num = n;
771 size = sizeof(*dl) + n * sizeof(*dr);
772
773 err = copy_to_user(arg, dl, size);
774 kfree(dl);
775
776 return err ? -EFAULT : 0;
777}
778
779int hci_get_dev_info(void __user *arg)
780{
781 struct hci_dev *hdev;
782 struct hci_dev_info di;
783 int err = 0;
784
785 if (copy_from_user(&di, arg, sizeof(di)))
786 return -EFAULT;
787
788 if (!(hdev = hci_dev_get(di.dev_id)))
789 return -ENODEV;
790
791 strcpy(di.name, hdev->name);
792 di.bdaddr = hdev->bdaddr;
793 di.type = hdev->type;
794 di.flags = hdev->flags;
795 di.pkt_type = hdev->pkt_type;
796 di.acl_mtu = hdev->acl_mtu;
797 di.acl_pkts = hdev->acl_pkts;
798 di.sco_mtu = hdev->sco_mtu;
799 di.sco_pkts = hdev->sco_pkts;
800 di.link_policy = hdev->link_policy;
801 di.link_mode = hdev->link_mode;
802
803 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
804 memcpy(&di.features, &hdev->features, sizeof(di.features));
805
806 if (copy_to_user(arg, &di, sizeof(di)))
807 err = -EFAULT;
808
809 hci_dev_put(hdev);
810
811 return err;
812}
813
814/* ---- Interface to HCI drivers ---- */
815
816/* Alloc HCI device */
817struct hci_dev *hci_alloc_dev(void)
818{
819 struct hci_dev *hdev;
820
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200821 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 if (!hdev)
823 return NULL;
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 skb_queue_head_init(&hdev->driver_init);
826
827 return hdev;
828}
829EXPORT_SYMBOL(hci_alloc_dev);
830
831/* Free HCI device */
832void hci_free_dev(struct hci_dev *hdev)
833{
834 skb_queue_purge(&hdev->driver_init);
835
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200836 /* will free via device release */
837 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
839EXPORT_SYMBOL(hci_free_dev);
840
841/* Register HCI device */
842int hci_register_dev(struct hci_dev *hdev)
843{
844 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200845 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 BT_DBG("%p name %s type %d owner %p", hdev, hdev->name, hdev->type, hdev->owner);
848
849 if (!hdev->open || !hdev->close || !hdev->destruct)
850 return -EINVAL;
851
852 write_lock_bh(&hci_dev_list_lock);
853
854 /* Find first available device id */
855 list_for_each(p, &hci_dev_list) {
856 if (list_entry(p, struct hci_dev, list)->id != id)
857 break;
858 head = p; id++;
859 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 sprintf(hdev->name, "hci%d", id);
862 hdev->id = id;
863 list_add(&hdev->list, head);
864
865 atomic_set(&hdev->refcnt, 1);
866 spin_lock_init(&hdev->lock);
867
868 hdev->flags = 0;
869 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200870 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 hdev->link_mode = (HCI_LM_ACCEPT);
872
Marcel Holtmann04837f62006-07-03 10:02:33 +0200873 hdev->idle_timeout = 0;
874 hdev->sniff_max_interval = 800;
875 hdev->sniff_min_interval = 80;
876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
878 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
879 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
880
881 skb_queue_head_init(&hdev->rx_q);
882 skb_queue_head_init(&hdev->cmd_q);
883 skb_queue_head_init(&hdev->raw_q);
884
Marcel Holtmannef222012007-07-11 06:42:04 +0200885 for (i = 0; i < 3; i++)
886 hdev->reassembly[i] = NULL;
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 init_waitqueue_head(&hdev->req_wait_q);
889 init_MUTEX(&hdev->req_lock);
890
891 inquiry_cache_init(hdev);
892
893 hci_conn_hash_init(hdev);
894
895 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
896
897 atomic_set(&hdev->promisc, 0);
898
899 write_unlock_bh(&hci_dev_list_lock);
900
901 hci_register_sysfs(hdev);
902
903 hci_notify(hdev, HCI_DEV_REG);
904
905 return id;
906}
907EXPORT_SYMBOL(hci_register_dev);
908
909/* Unregister HCI device */
910int hci_unregister_dev(struct hci_dev *hdev)
911{
Marcel Holtmannef222012007-07-11 06:42:04 +0200912 int i;
913
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 write_lock_bh(&hci_dev_list_lock);
917 list_del(&hdev->list);
918 write_unlock_bh(&hci_dev_list_lock);
919
920 hci_dev_do_close(hdev);
921
Marcel Holtmannef222012007-07-11 06:42:04 +0200922 for (i = 0; i < 3; i++)
923 kfree_skb(hdev->reassembly[i]);
924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 hci_notify(hdev, HCI_DEV_UNREG);
926
Dave Young147e2d52008-03-05 18:45:59 -0800927 hci_unregister_sysfs(hdev);
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +0200930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return 0;
932}
933EXPORT_SYMBOL(hci_unregister_dev);
934
935/* Suspend HCI device */
936int hci_suspend_dev(struct hci_dev *hdev)
937{
938 hci_notify(hdev, HCI_DEV_SUSPEND);
939 return 0;
940}
941EXPORT_SYMBOL(hci_suspend_dev);
942
943/* Resume HCI device */
944int hci_resume_dev(struct hci_dev *hdev)
945{
946 hci_notify(hdev, HCI_DEV_RESUME);
947 return 0;
948}
949EXPORT_SYMBOL(hci_resume_dev);
950
Marcel Holtmannef222012007-07-11 06:42:04 +0200951/* Receive packet type fragment */
952#define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 2])
953
954int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
955{
956 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
957 return -EILSEQ;
958
959 while (count) {
960 struct sk_buff *skb = __reassembly(hdev, type);
961 struct { int expect; } *scb;
962 int len = 0;
963
964 if (!skb) {
965 /* Start of the frame */
966
967 switch (type) {
968 case HCI_EVENT_PKT:
969 if (count >= HCI_EVENT_HDR_SIZE) {
970 struct hci_event_hdr *h = data;
971 len = HCI_EVENT_HDR_SIZE + h->plen;
972 } else
973 return -EILSEQ;
974 break;
975
976 case HCI_ACLDATA_PKT:
977 if (count >= HCI_ACL_HDR_SIZE) {
978 struct hci_acl_hdr *h = data;
979 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
980 } else
981 return -EILSEQ;
982 break;
983
984 case HCI_SCODATA_PKT:
985 if (count >= HCI_SCO_HDR_SIZE) {
986 struct hci_sco_hdr *h = data;
987 len = HCI_SCO_HDR_SIZE + h->dlen;
988 } else
989 return -EILSEQ;
990 break;
991 }
992
993 skb = bt_skb_alloc(len, GFP_ATOMIC);
994 if (!skb) {
995 BT_ERR("%s no memory for packet", hdev->name);
996 return -ENOMEM;
997 }
998
999 skb->dev = (void *) hdev;
1000 bt_cb(skb)->pkt_type = type;
YOSHIFUJI Hideaki00ae02f2007-07-19 10:43:16 +09001001
Marcel Holtmannef222012007-07-11 06:42:04 +02001002 __reassembly(hdev, type) = skb;
1003
1004 scb = (void *) skb->cb;
1005 scb->expect = len;
1006 } else {
1007 /* Continuation */
1008
1009 scb = (void *) skb->cb;
1010 len = scb->expect;
1011 }
1012
1013 len = min(len, count);
1014
1015 memcpy(skb_put(skb, len), data, len);
1016
1017 scb->expect -= len;
1018
1019 if (scb->expect == 0) {
1020 /* Complete frame */
1021
1022 __reassembly(hdev, type) = NULL;
1023
1024 bt_cb(skb)->pkt_type = type;
1025 hci_recv_frame(skb);
1026 }
1027
1028 count -= len; data += len;
1029 }
1030
1031 return 0;
1032}
1033EXPORT_SYMBOL(hci_recv_fragment);
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035/* ---- Interface to upper protocols ---- */
1036
1037/* Register/Unregister protocols.
1038 * hci_task_lock is used to ensure that no tasks are running. */
1039int hci_register_proto(struct hci_proto *hp)
1040{
1041 int err = 0;
1042
1043 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1044
1045 if (hp->id >= HCI_MAX_PROTO)
1046 return -EINVAL;
1047
1048 write_lock_bh(&hci_task_lock);
1049
1050 if (!hci_proto[hp->id])
1051 hci_proto[hp->id] = hp;
1052 else
1053 err = -EEXIST;
1054
1055 write_unlock_bh(&hci_task_lock);
1056
1057 return err;
1058}
1059EXPORT_SYMBOL(hci_register_proto);
1060
1061int hci_unregister_proto(struct hci_proto *hp)
1062{
1063 int err = 0;
1064
1065 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1066
1067 if (hp->id >= HCI_MAX_PROTO)
1068 return -EINVAL;
1069
1070 write_lock_bh(&hci_task_lock);
1071
1072 if (hci_proto[hp->id])
1073 hci_proto[hp->id] = NULL;
1074 else
1075 err = -ENOENT;
1076
1077 write_unlock_bh(&hci_task_lock);
1078
1079 return err;
1080}
1081EXPORT_SYMBOL(hci_unregister_proto);
1082
1083int hci_register_cb(struct hci_cb *cb)
1084{
1085 BT_DBG("%p name %s", cb, cb->name);
1086
1087 write_lock_bh(&hci_cb_list_lock);
1088 list_add(&cb->list, &hci_cb_list);
1089 write_unlock_bh(&hci_cb_list_lock);
1090
1091 return 0;
1092}
1093EXPORT_SYMBOL(hci_register_cb);
1094
1095int hci_unregister_cb(struct hci_cb *cb)
1096{
1097 BT_DBG("%p name %s", cb, cb->name);
1098
1099 write_lock_bh(&hci_cb_list_lock);
1100 list_del(&cb->list);
1101 write_unlock_bh(&hci_cb_list_lock);
1102
1103 return 0;
1104}
1105EXPORT_SYMBOL(hci_unregister_cb);
1106
1107static int hci_send_frame(struct sk_buff *skb)
1108{
1109 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1110
1111 if (!hdev) {
1112 kfree_skb(skb);
1113 return -ENODEV;
1114 }
1115
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001116 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
1118 if (atomic_read(&hdev->promisc)) {
1119 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001120 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 hci_send_to_sock(hdev, skb);
1123 }
1124
1125 /* Get rid of skb owner, prior to sending to the driver. */
1126 skb_orphan(skb);
1127
1128 return hdev->send(skb);
1129}
1130
1131/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001132int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133{
1134 int len = HCI_COMMAND_HDR_SIZE + plen;
1135 struct hci_command_hdr *hdr;
1136 struct sk_buff *skb;
1137
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001138 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
1140 skb = bt_skb_alloc(len, GFP_ATOMIC);
1141 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001142 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 return -ENOMEM;
1144 }
1145
1146 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001147 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 hdr->plen = plen;
1149
1150 if (plen)
1151 memcpy(skb_put(skb, plen), param, plen);
1152
1153 BT_DBG("skb len %d", skb->len);
1154
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001155 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 skb->dev = (void *) hdev;
1157 skb_queue_tail(&hdev->cmd_q, skb);
1158 hci_sched_cmd(hdev);
1159
1160 return 0;
1161}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001164void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165{
1166 struct hci_command_hdr *hdr;
1167
1168 if (!hdev->sent_cmd)
1169 return NULL;
1170
1171 hdr = (void *) hdev->sent_cmd->data;
1172
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001173 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 return NULL;
1175
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001176 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1179}
1180
1181/* Send ACL data */
1182static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1183{
1184 struct hci_acl_hdr *hdr;
1185 int len = skb->len;
1186
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001187 skb_push(skb, HCI_ACL_HDR_SIZE);
1188 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001189 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001190 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1191 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192}
1193
1194int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1195{
1196 struct hci_dev *hdev = conn->hdev;
1197 struct sk_buff *list;
1198
1199 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1200
1201 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001202 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1204
1205 if (!(list = skb_shinfo(skb)->frag_list)) {
1206 /* Non fragmented */
1207 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1208
1209 skb_queue_tail(&conn->data_q, skb);
1210 } else {
1211 /* Fragmented */
1212 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1213
1214 skb_shinfo(skb)->frag_list = NULL;
1215
1216 /* Queue all fragments atomically */
1217 spin_lock_bh(&conn->data_q.lock);
1218
1219 __skb_queue_tail(&conn->data_q, skb);
1220 do {
1221 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001224 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1226
1227 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1228
1229 __skb_queue_tail(&conn->data_q, skb);
1230 } while (list);
1231
1232 spin_unlock_bh(&conn->data_q.lock);
1233 }
1234
1235 hci_sched_tx(hdev);
1236 return 0;
1237}
1238EXPORT_SYMBOL(hci_send_acl);
1239
1240/* Send SCO data */
1241int hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1242{
1243 struct hci_dev *hdev = conn->hdev;
1244 struct hci_sco_hdr hdr;
1245
1246 BT_DBG("%s len %d", hdev->name, skb->len);
1247
1248 if (skb->len > hdev->sco_mtu) {
1249 kfree_skb(skb);
1250 return -EINVAL;
1251 }
1252
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001253 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 hdr.dlen = skb->len;
1255
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001256 skb_push(skb, HCI_SCO_HDR_SIZE);
1257 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001258 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259
1260 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001261 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 skb_queue_tail(&conn->data_q, skb);
1263 hci_sched_tx(hdev);
1264 return 0;
1265}
1266EXPORT_SYMBOL(hci_send_sco);
1267
1268/* ---- HCI TX task (outgoing data) ---- */
1269
1270/* HCI Connection scheduler */
1271static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1272{
1273 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001274 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 int num = 0, min = ~0;
1276 struct list_head *p;
1277
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001278 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 * added and removed with TX task disabled. */
1280 list_for_each(p, &h->list) {
1281 struct hci_conn *c;
1282 c = list_entry(p, struct hci_conn, list);
1283
Marcel Holtmann769be972008-07-14 20:13:49 +02001284 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001286
1287 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1288 continue;
1289
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 num++;
1291
1292 if (c->sent < min) {
1293 min = c->sent;
1294 conn = c;
1295 }
1296 }
1297
1298 if (conn) {
1299 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1300 int q = cnt / num;
1301 *quote = q ? q : 1;
1302 } else
1303 *quote = 0;
1304
1305 BT_DBG("conn %p quote %d", conn, *quote);
1306 return conn;
1307}
1308
1309static inline void hci_acl_tx_to(struct hci_dev *hdev)
1310{
1311 struct hci_conn_hash *h = &hdev->conn_hash;
1312 struct list_head *p;
1313 struct hci_conn *c;
1314
1315 BT_ERR("%s ACL tx timeout", hdev->name);
1316
1317 /* Kill stalled connections */
1318 list_for_each(p, &h->list) {
1319 c = list_entry(p, struct hci_conn, list);
1320 if (c->type == ACL_LINK && c->sent) {
1321 BT_ERR("%s killing stalled ACL connection %s",
1322 hdev->name, batostr(&c->dst));
1323 hci_acl_disconn(c, 0x13);
1324 }
1325 }
1326}
1327
1328static inline void hci_sched_acl(struct hci_dev *hdev)
1329{
1330 struct hci_conn *conn;
1331 struct sk_buff *skb;
1332 int quote;
1333
1334 BT_DBG("%s", hdev->name);
1335
1336 if (!test_bit(HCI_RAW, &hdev->flags)) {
1337 /* ACL tx timeout must be longer than maximum
1338 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001339 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 hci_acl_tx_to(hdev);
1341 }
1342
1343 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1345 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001346
1347 hci_conn_enter_active_mode(conn);
1348
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 hci_send_frame(skb);
1350 hdev->acl_last_tx = jiffies;
1351
1352 hdev->acl_cnt--;
1353 conn->sent++;
1354 }
1355 }
1356}
1357
1358/* Schedule SCO */
1359static inline void hci_sched_sco(struct hci_dev *hdev)
1360{
1361 struct hci_conn *conn;
1362 struct sk_buff *skb;
1363 int quote;
1364
1365 BT_DBG("%s", hdev->name);
1366
1367 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1368 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1369 BT_DBG("skb %p len %d", skb, skb->len);
1370 hci_send_frame(skb);
1371
1372 conn->sent++;
1373 if (conn->sent == ~0)
1374 conn->sent = 0;
1375 }
1376 }
1377}
1378
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001379static inline void hci_sched_esco(struct hci_dev *hdev)
1380{
1381 struct hci_conn *conn;
1382 struct sk_buff *skb;
1383 int quote;
1384
1385 BT_DBG("%s", hdev->name);
1386
1387 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1388 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1389 BT_DBG("skb %p len %d", skb, skb->len);
1390 hci_send_frame(skb);
1391
1392 conn->sent++;
1393 if (conn->sent == ~0)
1394 conn->sent = 0;
1395 }
1396 }
1397}
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399static void hci_tx_task(unsigned long arg)
1400{
1401 struct hci_dev *hdev = (struct hci_dev *) arg;
1402 struct sk_buff *skb;
1403
1404 read_lock(&hci_task_lock);
1405
1406 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1407
1408 /* Schedule queues and send stuff to HCI driver */
1409
1410 hci_sched_acl(hdev);
1411
1412 hci_sched_sco(hdev);
1413
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001414 hci_sched_esco(hdev);
1415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 /* Send next queued raw (unknown type) packet */
1417 while ((skb = skb_dequeue(&hdev->raw_q)))
1418 hci_send_frame(skb);
1419
1420 read_unlock(&hci_task_lock);
1421}
1422
1423/* ----- HCI RX task (incoming data proccessing) ----- */
1424
1425/* ACL data packet */
1426static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1427{
1428 struct hci_acl_hdr *hdr = (void *) skb->data;
1429 struct hci_conn *conn;
1430 __u16 handle, flags;
1431
1432 skb_pull(skb, HCI_ACL_HDR_SIZE);
1433
1434 handle = __le16_to_cpu(hdr->handle);
1435 flags = hci_flags(handle);
1436 handle = hci_handle(handle);
1437
1438 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1439
1440 hdev->stat.acl_rx++;
1441
1442 hci_dev_lock(hdev);
1443 conn = hci_conn_hash_lookup_handle(hdev, handle);
1444 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 if (conn) {
1447 register struct hci_proto *hp;
1448
Marcel Holtmann04837f62006-07-03 10:02:33 +02001449 hci_conn_enter_active_mode(conn);
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 /* Send to upper protocol */
1452 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1453 hp->recv_acldata(conn, skb, flags);
1454 return;
1455 }
1456 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001457 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 hdev->name, handle);
1459 }
1460
1461 kfree_skb(skb);
1462}
1463
1464/* SCO data packet */
1465static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1466{
1467 struct hci_sco_hdr *hdr = (void *) skb->data;
1468 struct hci_conn *conn;
1469 __u16 handle;
1470
1471 skb_pull(skb, HCI_SCO_HDR_SIZE);
1472
1473 handle = __le16_to_cpu(hdr->handle);
1474
1475 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1476
1477 hdev->stat.sco_rx++;
1478
1479 hci_dev_lock(hdev);
1480 conn = hci_conn_hash_lookup_handle(hdev, handle);
1481 hci_dev_unlock(hdev);
1482
1483 if (conn) {
1484 register struct hci_proto *hp;
1485
1486 /* Send to upper protocol */
1487 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1488 hp->recv_scodata(conn, skb);
1489 return;
1490 }
1491 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001492 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 hdev->name, handle);
1494 }
1495
1496 kfree_skb(skb);
1497}
1498
Marcel Holtmann65164552005-10-28 19:20:48 +02001499static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500{
1501 struct hci_dev *hdev = (struct hci_dev *) arg;
1502 struct sk_buff *skb;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 read_lock(&hci_task_lock);
1507
1508 while ((skb = skb_dequeue(&hdev->rx_q))) {
1509 if (atomic_read(&hdev->promisc)) {
1510 /* Send copy to the sockets */
1511 hci_send_to_sock(hdev, skb);
1512 }
1513
1514 if (test_bit(HCI_RAW, &hdev->flags)) {
1515 kfree_skb(skb);
1516 continue;
1517 }
1518
1519 if (test_bit(HCI_INIT, &hdev->flags)) {
1520 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001521 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 case HCI_ACLDATA_PKT:
1523 case HCI_SCODATA_PKT:
1524 kfree_skb(skb);
1525 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 }
1528
1529 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001530 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 case HCI_EVENT_PKT:
1532 hci_event_packet(hdev, skb);
1533 break;
1534
1535 case HCI_ACLDATA_PKT:
1536 BT_DBG("%s ACL data packet", hdev->name);
1537 hci_acldata_packet(hdev, skb);
1538 break;
1539
1540 case HCI_SCODATA_PKT:
1541 BT_DBG("%s SCO data packet", hdev->name);
1542 hci_scodata_packet(hdev, skb);
1543 break;
1544
1545 default:
1546 kfree_skb(skb);
1547 break;
1548 }
1549 }
1550
1551 read_unlock(&hci_task_lock);
1552}
1553
1554static void hci_cmd_task(unsigned long arg)
1555{
1556 struct hci_dev *hdev = (struct hci_dev *) arg;
1557 struct sk_buff *skb;
1558
1559 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1560
S.Çağlar Onur824530212008-02-17 23:25:57 -08001561 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 BT_ERR("%s command tx timeout", hdev->name);
1563 atomic_set(&hdev->cmd_cnt, 1);
1564 }
1565
1566 /* Send queued commands */
1567 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001568 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1571 atomic_dec(&hdev->cmd_cnt);
1572 hci_send_frame(skb);
1573 hdev->cmd_last_tx = jiffies;
1574 } else {
1575 skb_queue_head(&hdev->cmd_q, skb);
1576 hci_sched_cmd(hdev);
1577 }
1578 }
1579}