blob: b7bbe029a28f10496fb823791cc97544cefa8b41 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219 skb_queue_purge(&hdev->driver_init);
220
221 /* Mandatory initialization */
222
223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
308}
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __u8 scan = opt;
313
314 BT_DBG("%s %x", hdev->name, scan);
315
316 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200317 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
321{
322 __u8 auth = opt;
323
324 BT_DBG("%s %x", hdev->name, auth);
325
326 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200327 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
331{
332 __u8 encrypt = opt;
333
334 BT_DBG("%s %x", hdev->name, encrypt);
335
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200336 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200337 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200340static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
341{
342 __le16 policy = cpu_to_le16(opt);
343
Marcel Holtmanna418b892008-11-30 12:17:28 +0100344 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200345
346 /* Default link policy */
347 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
348}
349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900350/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 * Device is held on return. */
352struct hci_dev *hci_dev_get(int index)
353{
354 struct hci_dev *hdev = NULL;
355 struct list_head *p;
356
357 BT_DBG("%d", index);
358
359 if (index < 0)
360 return NULL;
361
362 read_lock(&hci_dev_list_lock);
363 list_for_each(p, &hci_dev_list) {
364 struct hci_dev *d = list_entry(p, struct hci_dev, list);
365 if (d->id == index) {
366 hdev = hci_dev_hold(d);
367 break;
368 }
369 }
370 read_unlock(&hci_dev_list_lock);
371 return hdev;
372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375/* ---- Inquiry support ---- */
376static void inquiry_cache_flush(struct hci_dev *hdev)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *next = cache->list, *e;
380
381 BT_DBG("cache %p", cache);
382
383 cache->list = NULL;
384 while ((e = next)) {
385 next = e->next;
386 kfree(e);
387 }
388}
389
390struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_entry *e;
394
395 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
396
397 for (e = cache->list; e; e = e->next)
398 if (!bacmp(&e->data.bdaddr, bdaddr))
399 break;
400 return e;
401}
402
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
404{
405 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200406 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
409
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200410 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
411 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200413 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
414 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416
417 ie->next = cache->list;
418 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200421 memcpy(&ie->data, data, sizeof(*data));
422 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 cache->timestamp = jiffies;
424}
425
426static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
427{
428 struct inquiry_cache *cache = &hdev->inq_cache;
429 struct inquiry_info *info = (struct inquiry_info *) buf;
430 struct inquiry_entry *e;
431 int copied = 0;
432
433 for (e = cache->list; e && copied < num; e = e->next, copied++) {
434 struct inquiry_data *data = &e->data;
435 bacpy(&info->bdaddr, &data->bdaddr);
436 info->pscan_rep_mode = data->pscan_rep_mode;
437 info->pscan_period_mode = data->pscan_period_mode;
438 info->pscan_mode = data->pscan_mode;
439 memcpy(info->dev_class, data->dev_class, 3);
440 info->clock_offset = data->clock_offset;
441 info++;
442 }
443
444 BT_DBG("cache %p, copied %d", cache, copied);
445 return copied;
446}
447
448static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
449{
450 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
451 struct hci_cp_inquiry cp;
452
453 BT_DBG("%s", hdev->name);
454
455 if (test_bit(HCI_INQUIRY, &hdev->flags))
456 return;
457
458 /* Start Inquiry */
459 memcpy(&cp.lap, &ir->lap, 3);
460 cp.length = ir->length;
461 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200462 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465int hci_inquiry(void __user *arg)
466{
467 __u8 __user *ptr = arg;
468 struct hci_inquiry_req ir;
469 struct hci_dev *hdev;
470 int err = 0, do_inquiry = 0, max_rsp;
471 long timeo;
472 __u8 *buf;
473
474 if (copy_from_user(&ir, ptr, sizeof(ir)))
475 return -EFAULT;
476
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200477 hdev = hci_dev_get(ir.dev_id);
478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -ENODEV;
480
481 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900482 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 inquiry_cache_empty(hdev) ||
484 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 inquiry_cache_flush(hdev);
486 do_inquiry = 1;
487 }
488 hci_dev_unlock_bh(hdev);
489
Marcel Holtmann04837f62006-07-03 10:02:33 +0200490 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491
492 if (do_inquiry) {
493 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
494 if (err < 0)
495 goto done;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* for unlimited number of responses we will use buffer with 255 entries */
499 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
500
501 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
502 * copy it to the user space.
503 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100504 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -ENOMEM;
507 goto done;
508 }
509
510 hci_dev_lock_bh(hdev);
511 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
512 hci_dev_unlock_bh(hdev);
513
514 BT_DBG("num_rsp %d", ir.num_rsp);
515
516 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
517 ptr += sizeof(ir);
518 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 ir.num_rsp))
520 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900521 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 err = -EFAULT;
523
524 kfree(buf);
525
526done:
527 hci_dev_put(hdev);
528 return err;
529}
530
531/* ---- HCI ioctl helpers ---- */
532
533int hci_dev_open(__u16 dev)
534{
535 struct hci_dev *hdev;
536 int ret = 0;
537
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200538 hdev = hci_dev_get(dev);
539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -ENODEV;
541
542 BT_DBG("%s %p", hdev->name, hdev);
543
544 hci_req_lock(hdev);
545
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200546 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
547 ret = -ERFKILL;
548 goto done;
549 }
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (test_bit(HCI_UP, &hdev->flags)) {
552 ret = -EALREADY;
553 goto done;
554 }
555
556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
557 set_bit(HCI_RAW, &hdev->flags);
558
559 if (hdev->open(hdev)) {
560 ret = -EIO;
561 goto done;
562 }
563
564 if (!test_bit(HCI_RAW, &hdev->flags)) {
565 atomic_set(&hdev->cmd_cnt, 1);
566 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200567 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Marcel Holtmann04837f62006-07-03 10:02:33 +0200569 ret = __hci_request(hdev, hci_init_req, 0,
570 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300573 ret = __hci_request(hdev, hci_le_init_req, 0,
574 msecs_to_jiffies(HCI_INIT_TIMEOUT));
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 clear_bit(HCI_INIT, &hdev->flags);
577 }
578
579 if (!ret) {
580 hci_dev_hold(hdev);
581 set_bit(HCI_UP, &hdev->flags);
582 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700583 if (!test_bit(HCI_SETUP, &hdev->flags) &&
584 hdev->dev_type == HCI_BREDR)
Johan Hedberg5add6af2010-12-16 10:00:37 +0200585 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900586 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 /* Init failed, cleanup */
588 tasklet_kill(&hdev->rx_task);
589 tasklet_kill(&hdev->tx_task);
590 tasklet_kill(&hdev->cmd_task);
591
592 skb_queue_purge(&hdev->cmd_q);
593 skb_queue_purge(&hdev->rx_q);
594
595 if (hdev->flush)
596 hdev->flush(hdev);
597
598 if (hdev->sent_cmd) {
599 kfree_skb(hdev->sent_cmd);
600 hdev->sent_cmd = NULL;
601 }
602
603 hdev->close(hdev);
604 hdev->flags = 0;
605 }
606
607done:
608 hci_req_unlock(hdev);
609 hci_dev_put(hdev);
610 return ret;
611}
612
613static int hci_dev_do_close(struct hci_dev *hdev)
614{
615 BT_DBG("%s %p", hdev->name, hdev);
616
617 hci_req_cancel(hdev, ENODEV);
618 hci_req_lock(hdev);
619
620 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300621 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 hci_req_unlock(hdev);
623 return 0;
624 }
625
626 /* Kill RX and TX tasks */
627 tasklet_kill(&hdev->rx_task);
628 tasklet_kill(&hdev->tx_task);
629
630 hci_dev_lock_bh(hdev);
631 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev);
633 hci_dev_unlock_bh(hdev);
634
635 hci_notify(hdev, HCI_DEV_DOWN);
636
637 if (hdev->flush)
638 hdev->flush(hdev);
639
640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags)) {
644 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200645 __hci_request(hdev, hci_reset_req, 0,
646 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 /* Kill cmd task */
651 tasklet_kill(&hdev->cmd_task);
652
653 /* Drop queues */
654 skb_queue_purge(&hdev->rx_q);
655 skb_queue_purge(&hdev->cmd_q);
656 skb_queue_purge(&hdev->raw_q);
657
658 /* Drop last sent command */
659 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300660 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 kfree_skb(hdev->sent_cmd);
662 hdev->sent_cmd = NULL;
663 }
664
665 /* After this point our queues are empty
666 * and no tasks are scheduled. */
667 hdev->close(hdev);
668
Peter Krystad1fc44072011-08-30 15:38:12 -0700669 if (hdev->dev_type == HCI_BREDR)
670 mgmt_powered(hdev->id, 0);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 /* Clear flags */
673 hdev->flags = 0;
674
675 hci_req_unlock(hdev);
676
677 hci_dev_put(hdev);
678 return 0;
679}
680
681int hci_dev_close(__u16 dev)
682{
683 struct hci_dev *hdev;
684 int err;
685
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200686 hdev = hci_dev_get(dev);
687 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 return -ENODEV;
689 err = hci_dev_do_close(hdev);
690 hci_dev_put(hdev);
691 return err;
692}
693
694int hci_dev_reset(__u16 dev)
695{
696 struct hci_dev *hdev;
697 int ret = 0;
698
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200699 hdev = hci_dev_get(dev);
700 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 return -ENODEV;
702
703 hci_req_lock(hdev);
704 tasklet_disable(&hdev->tx_task);
705
706 if (!test_bit(HCI_UP, &hdev->flags))
707 goto done;
708
709 /* Drop queues */
710 skb_queue_purge(&hdev->rx_q);
711 skb_queue_purge(&hdev->cmd_q);
712
713 hci_dev_lock_bh(hdev);
714 inquiry_cache_flush(hdev);
715 hci_conn_hash_flush(hdev);
716 hci_dev_unlock_bh(hdev);
717
718 if (hdev->flush)
719 hdev->flush(hdev);
720
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900721 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200725 ret = __hci_request(hdev, hci_reset_req, 0,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728done:
729 tasklet_enable(&hdev->tx_task);
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
733}
734
735int hci_dev_reset_stat(__u16 dev)
736{
737 struct hci_dev *hdev;
738 int ret = 0;
739
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200740 hdev = hci_dev_get(dev);
741 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 return -ENODEV;
743
744 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
745
746 hci_dev_put(hdev);
747
748 return ret;
749}
750
751int hci_dev_cmd(unsigned int cmd, void __user *arg)
752{
753 struct hci_dev *hdev;
754 struct hci_dev_req dr;
755 int err = 0;
756
757 if (copy_from_user(&dr, arg, sizeof(dr)))
758 return -EFAULT;
759
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200760 hdev = hci_dev_get(dr.dev_id);
761 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 return -ENODEV;
763
764 switch (cmd) {
765 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200766 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
767 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 break;
769
770 case HCISETENCRYPT:
771 if (!lmp_encrypt_capable(hdev)) {
772 err = -EOPNOTSUPP;
773 break;
774 }
775
776 if (!test_bit(HCI_AUTH, &hdev->flags)) {
777 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200778 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
779 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (err)
781 break;
782 }
783
Marcel Holtmann04837f62006-07-03 10:02:33 +0200784 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
785 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 break;
787
788 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200789 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 break;
792
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200793 case HCISETLINKPOL:
794 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
796 break;
797
798 case HCISETLINKMODE:
799 hdev->link_mode = ((__u16) dr.dev_opt) &
800 (HCI_LM_MASTER | HCI_LM_ACCEPT);
801 break;
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 case HCISETPTYPE:
804 hdev->pkt_type = (__u16) dr.dev_opt;
805 break;
806
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200808 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
809 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 break;
811
812 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200813 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
814 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 break;
816
817 default:
818 err = -EINVAL;
819 break;
820 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 hci_dev_put(hdev);
823 return err;
824}
825
826int hci_get_dev_list(void __user *arg)
827{
828 struct hci_dev_list_req *dl;
829 struct hci_dev_req *dr;
830 struct list_head *p;
831 int n = 0, size, err;
832 __u16 dev_num;
833
834 if (get_user(dev_num, (__u16 __user *) arg))
835 return -EFAULT;
836
837 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
838 return -EINVAL;
839
840 size = sizeof(*dl) + dev_num * sizeof(*dr);
841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 dl = kzalloc(size, GFP_KERNEL);
843 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 return -ENOMEM;
845
846 dr = dl->dev_req;
847
848 read_lock_bh(&hci_dev_list_lock);
849 list_for_each(p, &hci_dev_list) {
850 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200853
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200854 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 (dr + n)->dev_id = hdev->id;
860 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 if (++n >= dev_num)
863 break;
864 }
865 read_unlock_bh(&hci_dev_list_lock);
866
867 dl->dev_num = n;
868 size = sizeof(*dl) + n * sizeof(*dr);
869
870 err = copy_to_user(arg, dl, size);
871 kfree(dl);
872
873 return err ? -EFAULT : 0;
874}
875
876int hci_get_dev_info(void __user *arg)
877{
878 struct hci_dev *hdev;
879 struct hci_dev_info di;
880 int err = 0;
881
882 if (copy_from_user(&di, arg, sizeof(di)))
883 return -EFAULT;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 hdev = hci_dev_get(di.dev_id);
886 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENODEV;
888
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200889 hci_del_off_timer(hdev);
890
Johan Hedbergc542a062011-01-26 13:11:03 +0200891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 strcpy(di.name, hdev->name);
895 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100896 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 di.flags = hdev->flags;
898 di.pkt_type = hdev->pkt_type;
899 di.acl_mtu = hdev->acl_mtu;
900 di.acl_pkts = hdev->acl_pkts;
901 di.sco_mtu = hdev->sco_mtu;
902 di.sco_pkts = hdev->sco_pkts;
903 di.link_policy = hdev->link_policy;
904 di.link_mode = hdev->link_mode;
905
906 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
907 memcpy(&di.features, &hdev->features, sizeof(di.features));
908
909 if (copy_to_user(arg, &di, sizeof(di)))
910 err = -EFAULT;
911
912 hci_dev_put(hdev);
913
914 return err;
915}
916
917/* ---- Interface to HCI drivers ---- */
918
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200919static int hci_rfkill_set_block(void *data, bool blocked)
920{
921 struct hci_dev *hdev = data;
922
923 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
924
925 if (!blocked)
926 return 0;
927
928 hci_dev_do_close(hdev);
929
930 return 0;
931}
932
933static const struct rfkill_ops hci_rfkill_ops = {
934 .set_block = hci_rfkill_set_block,
935};
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937/* Alloc HCI device */
938struct hci_dev *hci_alloc_dev(void)
939{
940 struct hci_dev *hdev;
941
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200942 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 if (!hdev)
944 return NULL;
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 skb_queue_head_init(&hdev->driver_init);
947
948 return hdev;
949}
950EXPORT_SYMBOL(hci_alloc_dev);
951
952/* Free HCI device */
953void hci_free_dev(struct hci_dev *hdev)
954{
955 skb_queue_purge(&hdev->driver_init);
956
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200957 /* will free via device release */
958 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959}
960EXPORT_SYMBOL(hci_free_dev);
961
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200962static void hci_power_on(struct work_struct *work)
963{
964 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700965 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200966
967 BT_DBG("%s", hdev->name);
968
Inga Stotland5029fc22011-09-12 15:22:52 -0700969 err = hci_dev_open(hdev->id);
970 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200971 return;
972
Peter Krystad1fc44072011-08-30 15:38:12 -0700973 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
974 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200975 mod_timer(&hdev->off_timer,
976 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
977
Peter Krystad1fc44072011-08-30 15:38:12 -0700978 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
979 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200980 mgmt_index_added(hdev->id);
981}
982
983static void hci_power_off(struct work_struct *work)
984{
985 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
986
987 BT_DBG("%s", hdev->name);
988
989 hci_dev_close(hdev->id);
990}
991
992static void hci_auto_off(unsigned long data)
993{
994 struct hci_dev *hdev = (struct hci_dev *) data;
995
996 BT_DBG("%s", hdev->name);
997
998 clear_bit(HCI_AUTO_OFF, &hdev->flags);
999
1000 queue_work(hdev->workqueue, &hdev->power_off);
1001}
1002
1003void hci_del_off_timer(struct hci_dev *hdev)
1004{
1005 BT_DBG("%s", hdev->name);
1006
1007 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1008 del_timer(&hdev->off_timer);
1009}
1010
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001011int hci_uuids_clear(struct hci_dev *hdev)
1012{
1013 struct list_head *p, *n;
1014
1015 list_for_each_safe(p, n, &hdev->uuids) {
1016 struct bt_uuid *uuid;
1017
1018 uuid = list_entry(p, struct bt_uuid, list);
1019
1020 list_del(p);
1021 kfree(uuid);
1022 }
1023
1024 return 0;
1025}
1026
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001027int hci_link_keys_clear(struct hci_dev *hdev)
1028{
1029 struct list_head *p, *n;
1030
1031 list_for_each_safe(p, n, &hdev->link_keys) {
1032 struct link_key *key;
1033
1034 key = list_entry(p, struct link_key, list);
1035
1036 list_del(p);
1037 kfree(key);
1038 }
1039
1040 return 0;
1041}
1042
1043struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1044{
1045 struct list_head *p;
1046
1047 list_for_each(p, &hdev->link_keys) {
1048 struct link_key *k;
1049
1050 k = list_entry(p, struct link_key, list);
1051
1052 if (bacmp(bdaddr, &k->bdaddr) == 0)
1053 return k;
1054 }
1055
1056 return NULL;
1057}
1058
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001059struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1060{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001063 list_for_each(p, &hdev->link_keys) {
1064 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001065 struct key_master_id *id;
1066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 k = list_entry(p, struct link_key, list);
1068
Brian Gixcf956772011-10-20 15:18:51 -07001069 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001070 continue;
1071
1072 if (k->dlen != sizeof(*id))
1073 continue;
1074
1075 id = (void *) &k->data;
1076 if (id->ediv == ediv &&
1077 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1078 return k;
1079 }
1080
1081 return NULL;
1082}
1083EXPORT_SYMBOL(hci_find_ltk);
1084
1085struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1086 bdaddr_t *bdaddr, u8 type)
1087{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001089
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 list_for_each(p, &hdev->link_keys) {
1091 struct link_key *k;
1092
1093 k = list_entry(p, struct link_key, list);
1094
Brian Gixcf956772011-10-20 15:18:51 -07001095 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001096 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001098
1099 return NULL;
1100}
1101EXPORT_SYMBOL(hci_find_link_key_type);
1102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1104 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001105{
1106 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001107 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001108 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001109 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001110
1111 old_key = hci_find_link_key(hdev, bdaddr);
1112 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001113 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001114 key = old_key;
1115 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001117 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1118 if (!key)
1119 return -ENOMEM;
1120 list_add(&key->list, &hdev->link_keys);
1121 }
1122
1123 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1124
1125 bacpy(&key->bdaddr, bdaddr);
1126 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001127 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001128 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 key->pin_len = pin_len;
1130
Brian Gixa68668b2011-08-11 15:49:36 -07001131 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301132 /* Store the link key persistently if one of the following is true:
1133 * 1. the remote side is using dedicated bonding since in that case
1134 * also the local requirements are set to dedicated bonding
1135 * 2. the local side had dedicated bonding as a requirement
1136 * 3. this is a legacy link key
1137 * 4. this is a changed combination key and there was a previously
1138 * stored one
1139 * If none of the above match only keep the link key around for
1140 * this connection and set the temporary flag for the device.
1141 */
Brian Gixa68668b2011-08-11 15:49:36 -07001142
Brian Gixdfdd9362011-08-18 09:58:02 -07001143 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301144 if ((conn->remote_auth > 0x01) ||
1145 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001146 (key->key_type < 0x03) ||
1147 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001148 bonded = 1;
1149 }
Brian Gixa68668b2011-08-11 15:49:36 -07001150
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001152 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001153
1154 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001155 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001156
1157 return 0;
1158}
1159
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001160int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001161 u8 addr_type, u8 key_size, u8 auth,
1162 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001163{
1164 struct link_key *key, *old_key;
1165 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001166
Brian Gixcf956772011-10-20 15:18:51 -07001167 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1168 batostr(bdaddr), addr_type);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001171 if (old_key) {
1172 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001173 } else {
1174 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1175 if (!key)
1176 return -ENOMEM;
1177 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001178 }
1179
1180 key->dlen = sizeof(*id);
1181
1182 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001183 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001184 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001185 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001186 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001187 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001188
1189 id = (void *) &key->data;
1190 id->ediv = ediv;
1191 memcpy(id->rand, rand, sizeof(id->rand));
1192
1193 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001194 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001195
1196 return 0;
1197}
1198
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001199int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1200{
1201 struct link_key *key;
1202
1203 key = hci_find_link_key(hdev, bdaddr);
1204 if (!key)
1205 return -ENOENT;
1206
1207 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1208
1209 list_del(&key->list);
1210 kfree(key);
1211
1212 return 0;
1213}
1214
Ville Tervo6bd32322011-02-16 16:32:41 +02001215/* HCI command timer function */
1216static void hci_cmd_timer(unsigned long arg)
1217{
1218 struct hci_dev *hdev = (void *) arg;
1219
1220 BT_ERR("%s command tx timeout", hdev->name);
1221 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001223 tasklet_schedule(&hdev->cmd_task);
1224}
1225
Szymon Janc2763eda2011-03-22 13:12:22 +01001226struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1227 bdaddr_t *bdaddr)
1228{
1229 struct oob_data *data;
1230
1231 list_for_each_entry(data, &hdev->remote_oob_data, list)
1232 if (bacmp(bdaddr, &data->bdaddr) == 0)
1233 return data;
1234
1235 return NULL;
1236}
1237
1238int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1239{
1240 struct oob_data *data;
1241
1242 data = hci_find_remote_oob_data(hdev, bdaddr);
1243 if (!data)
1244 return -ENOENT;
1245
1246 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1247
1248 list_del(&data->list);
1249 kfree(data);
1250
1251 return 0;
1252}
1253
1254int hci_remote_oob_data_clear(struct hci_dev *hdev)
1255{
1256 struct oob_data *data, *n;
1257
1258 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1259 list_del(&data->list);
1260 kfree(data);
1261 }
1262
1263 return 0;
1264}
1265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001266static void hci_adv_clear(unsigned long arg)
1267{
1268 struct hci_dev *hdev = (void *) arg;
1269
1270 hci_adv_entries_clear(hdev);
1271}
1272
1273int hci_adv_entries_clear(struct hci_dev *hdev)
1274{
1275 struct list_head *p, *n;
1276
Brian Gixa68668b2011-08-11 15:49:36 -07001277 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 write_lock_bh(&hdev->adv_entries_lock);
1279
1280 list_for_each_safe(p, n, &hdev->adv_entries) {
1281 struct adv_entry *entry;
1282
1283 entry = list_entry(p, struct adv_entry, list);
1284
1285 list_del(p);
1286 kfree(entry);
1287 }
1288
1289 write_unlock_bh(&hdev->adv_entries_lock);
1290
1291 return 0;
1292}
1293
1294struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1295{
1296 struct list_head *p;
1297 struct adv_entry *res = NULL;
1298
Brian Gixa68668b2011-08-11 15:49:36 -07001299 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 read_lock_bh(&hdev->adv_entries_lock);
1301
1302 list_for_each(p, &hdev->adv_entries) {
1303 struct adv_entry *entry;
1304
1305 entry = list_entry(p, struct adv_entry, list);
1306
1307 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1308 res = entry;
1309 goto out;
1310 }
1311 }
1312out:
1313 read_unlock_bh(&hdev->adv_entries_lock);
1314 return res;
1315}
1316
1317static inline int is_connectable_adv(u8 evt_type)
1318{
1319 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1320 return 1;
1321
1322 return 0;
1323}
1324
Szymon Janc2763eda2011-03-22 13:12:22 +01001325int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1326 u8 *randomizer)
1327{
1328 struct oob_data *data;
1329
1330 data = hci_find_remote_oob_data(hdev, bdaddr);
1331
1332 if (!data) {
1333 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1334 if (!data)
1335 return -ENOMEM;
1336
1337 bacpy(&data->bdaddr, bdaddr);
1338 list_add(&data->list, &hdev->remote_oob_data);
1339 }
1340
1341 memcpy(data->hash, hash, sizeof(data->hash));
1342 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1343
1344 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1345
1346 return 0;
1347}
1348
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001349int hci_add_adv_entry(struct hci_dev *hdev,
1350 struct hci_ev_le_advertising_info *ev)
1351{
1352 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001353 u8 flags = 0;
1354 int i;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001355
Brian Gixa68668b2011-08-11 15:49:36 -07001356 BT_DBG("");
1357
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001358 if (!is_connectable_adv(ev->evt_type))
1359 return -EINVAL;
1360
Brian Gixfdd38922011-09-28 16:23:48 -07001361 if (ev->data && ev->length) {
1362 for (i = 0; (i + 2) < ev->length; i++)
1363 if (ev->data[i+1] == 0x01) {
1364 flags = ev->data[i+2];
1365 BT_DBG("flags: %2.2x", flags);
1366 break;
1367 } else {
1368 i += ev->data[i];
1369 }
1370 }
1371
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001372 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001373 /* Only new entries should be added to adv_entries. So, if
1374 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001375 if (entry) {
1376 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001377 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001378 }
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001379
1380 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1381 if (!entry)
1382 return -ENOMEM;
1383
1384 bacpy(&entry->bdaddr, &ev->bdaddr);
1385 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001386 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001387
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001388 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001389 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001390 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001391
1392 return 0;
1393}
1394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395static struct crypto_blkcipher *alloc_cypher(void)
1396{
1397 if (enable_smp)
1398 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1399
1400 return ERR_PTR(-ENOTSUPP);
1401}
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403/* Register HCI device */
1404int hci_register_dev(struct hci_dev *hdev)
1405{
1406 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001407 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001409 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1410 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
1412 if (!hdev->open || !hdev->close || !hdev->destruct)
1413 return -EINVAL;
1414
Peter Krystad462bf762011-09-19 14:20:20 -07001415 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1416
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 write_lock_bh(&hci_dev_list_lock);
1418
1419 /* Find first available device id */
1420 list_for_each(p, &hci_dev_list) {
1421 if (list_entry(p, struct hci_dev, list)->id != id)
1422 break;
1423 head = p; id++;
1424 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 sprintf(hdev->name, "hci%d", id);
1427 hdev->id = id;
1428 list_add(&hdev->list, head);
1429
1430 atomic_set(&hdev->refcnt, 1);
1431 spin_lock_init(&hdev->lock);
1432
1433 hdev->flags = 0;
1434 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001435 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001437 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438
Marcel Holtmann04837f62006-07-03 10:02:33 +02001439 hdev->idle_timeout = 0;
1440 hdev->sniff_max_interval = 800;
1441 hdev->sniff_min_interval = 80;
1442
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001443 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1445 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1446
1447 skb_queue_head_init(&hdev->rx_q);
1448 skb_queue_head_init(&hdev->cmd_q);
1449 skb_queue_head_init(&hdev->raw_q);
1450
Ville Tervo6bd32322011-02-16 16:32:41 +02001451 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1452
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301453 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001454 hdev->reassembly[i] = NULL;
1455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001457 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 inquiry_cache_init(hdev);
1460
1461 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001462 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
David Millerea4bd8b2010-07-30 21:54:49 -07001464 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001465
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001466 INIT_LIST_HEAD(&hdev->uuids);
1467
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001468 INIT_LIST_HEAD(&hdev->link_keys);
1469
Szymon Janc2763eda2011-03-22 13:12:22 +01001470 INIT_LIST_HEAD(&hdev->remote_oob_data);
1471
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001472 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473 rwlock_init(&hdev->adv_entries_lock);
1474 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001475
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001476 INIT_WORK(&hdev->power_on, hci_power_on);
1477 INIT_WORK(&hdev->power_off, hci_power_off);
1478 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1481
1482 atomic_set(&hdev->promisc, 0);
1483
1484 write_unlock_bh(&hci_dev_list_lock);
1485
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001486 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1487 if (!hdev->workqueue)
1488 goto nomem;
1489
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001490 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001491 if (IS_ERR(hdev->tfm))
1492 BT_INFO("Failed to load transform for ecb(aes): %ld",
1493 PTR_ERR(hdev->tfm));
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 hci_register_sysfs(hdev);
1496
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001497 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1498 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1499 if (hdev->rfkill) {
1500 if (rfkill_register(hdev->rfkill) < 0) {
1501 rfkill_destroy(hdev->rfkill);
1502 hdev->rfkill = NULL;
1503 }
1504 }
1505
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001506 set_bit(HCI_AUTO_OFF, &hdev->flags);
1507 set_bit(HCI_SETUP, &hdev->flags);
1508 queue_work(hdev->workqueue, &hdev->power_on);
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 hci_notify(hdev, HCI_DEV_REG);
1511
1512 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001513
1514nomem:
1515 write_lock_bh(&hci_dev_list_lock);
1516 list_del(&hdev->list);
1517 write_unlock_bh(&hci_dev_list_lock);
1518
1519 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520}
1521EXPORT_SYMBOL(hci_register_dev);
1522
1523/* Unregister HCI device */
1524int hci_unregister_dev(struct hci_dev *hdev)
1525{
Marcel Holtmannef222012007-07-11 06:42:04 +02001526 int i;
1527
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001528 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 write_lock_bh(&hci_dev_list_lock);
1531 list_del(&hdev->list);
1532 write_unlock_bh(&hci_dev_list_lock);
1533
1534 hci_dev_do_close(hdev);
1535
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301536 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001537 kfree_skb(hdev->reassembly[i]);
1538
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001539 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001540 !test_bit(HCI_SETUP, &hdev->flags) &&
1541 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001542 mgmt_index_removed(hdev->id);
1543
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001544 if (!IS_ERR(hdev->tfm))
1545 crypto_free_blkcipher(hdev->tfm);
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 hci_notify(hdev, HCI_DEV_UNREG);
1548
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001549 if (hdev->rfkill) {
1550 rfkill_unregister(hdev->rfkill);
1551 rfkill_destroy(hdev->rfkill);
1552 }
1553
Dave Young147e2d52008-03-05 18:45:59 -08001554 hci_unregister_sysfs(hdev);
1555
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001556 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001557 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001558
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001559 destroy_workqueue(hdev->workqueue);
1560
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001561 hci_dev_lock_bh(hdev);
1562 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001563 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001564 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001565 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001566 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001567 hci_dev_unlock_bh(hdev);
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 return 0;
1572}
1573EXPORT_SYMBOL(hci_unregister_dev);
1574
1575/* Suspend HCI device */
1576int hci_suspend_dev(struct hci_dev *hdev)
1577{
1578 hci_notify(hdev, HCI_DEV_SUSPEND);
1579 return 0;
1580}
1581EXPORT_SYMBOL(hci_suspend_dev);
1582
1583/* Resume HCI device */
1584int hci_resume_dev(struct hci_dev *hdev)
1585{
1586 hci_notify(hdev, HCI_DEV_RESUME);
1587 return 0;
1588}
1589EXPORT_SYMBOL(hci_resume_dev);
1590
Marcel Holtmann76bca882009-11-18 00:40:39 +01001591/* Receive frame from HCI drivers */
1592int hci_recv_frame(struct sk_buff *skb)
1593{
1594 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1595 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1596 && !test_bit(HCI_INIT, &hdev->flags))) {
1597 kfree_skb(skb);
1598 return -ENXIO;
1599 }
1600
1601 /* Incomming skb */
1602 bt_cb(skb)->incoming = 1;
1603
1604 /* Time stamp */
1605 __net_timestamp(skb);
1606
1607 /* Queue frame for rx task */
1608 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001609 tasklet_schedule(&hdev->rx_task);
1610
Marcel Holtmann76bca882009-11-18 00:40:39 +01001611 return 0;
1612}
1613EXPORT_SYMBOL(hci_recv_frame);
1614
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301615static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001616 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301617{
1618 int len = 0;
1619 int hlen = 0;
1620 int remain = count;
1621 struct sk_buff *skb;
1622 struct bt_skb_cb *scb;
1623
1624 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1625 index >= NUM_REASSEMBLY)
1626 return -EILSEQ;
1627
1628 skb = hdev->reassembly[index];
1629
1630 if (!skb) {
1631 switch (type) {
1632 case HCI_ACLDATA_PKT:
1633 len = HCI_MAX_FRAME_SIZE;
1634 hlen = HCI_ACL_HDR_SIZE;
1635 break;
1636 case HCI_EVENT_PKT:
1637 len = HCI_MAX_EVENT_SIZE;
1638 hlen = HCI_EVENT_HDR_SIZE;
1639 break;
1640 case HCI_SCODATA_PKT:
1641 len = HCI_MAX_SCO_SIZE;
1642 hlen = HCI_SCO_HDR_SIZE;
1643 break;
1644 }
1645
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001646 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301647 if (!skb)
1648 return -ENOMEM;
1649
1650 scb = (void *) skb->cb;
1651 scb->expect = hlen;
1652 scb->pkt_type = type;
1653
1654 skb->dev = (void *) hdev;
1655 hdev->reassembly[index] = skb;
1656 }
1657
1658 while (count) {
1659 scb = (void *) skb->cb;
1660 len = min(scb->expect, (__u16)count);
1661
1662 memcpy(skb_put(skb, len), data, len);
1663
1664 count -= len;
1665 data += len;
1666 scb->expect -= len;
1667 remain = count;
1668
1669 switch (type) {
1670 case HCI_EVENT_PKT:
1671 if (skb->len == HCI_EVENT_HDR_SIZE) {
1672 struct hci_event_hdr *h = hci_event_hdr(skb);
1673 scb->expect = h->plen;
1674
1675 if (skb_tailroom(skb) < scb->expect) {
1676 kfree_skb(skb);
1677 hdev->reassembly[index] = NULL;
1678 return -ENOMEM;
1679 }
1680 }
1681 break;
1682
1683 case HCI_ACLDATA_PKT:
1684 if (skb->len == HCI_ACL_HDR_SIZE) {
1685 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1686 scb->expect = __le16_to_cpu(h->dlen);
1687
1688 if (skb_tailroom(skb) < scb->expect) {
1689 kfree_skb(skb);
1690 hdev->reassembly[index] = NULL;
1691 return -ENOMEM;
1692 }
1693 }
1694 break;
1695
1696 case HCI_SCODATA_PKT:
1697 if (skb->len == HCI_SCO_HDR_SIZE) {
1698 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1699 scb->expect = h->dlen;
1700
1701 if (skb_tailroom(skb) < scb->expect) {
1702 kfree_skb(skb);
1703 hdev->reassembly[index] = NULL;
1704 return -ENOMEM;
1705 }
1706 }
1707 break;
1708 }
1709
1710 if (scb->expect == 0) {
1711 /* Complete frame */
1712
1713 bt_cb(skb)->pkt_type = type;
1714 hci_recv_frame(skb);
1715
1716 hdev->reassembly[index] = NULL;
1717 return remain;
1718 }
1719 }
1720
1721 return remain;
1722}
1723
Marcel Holtmannef222012007-07-11 06:42:04 +02001724int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1725{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301726 int rem = 0;
1727
Marcel Holtmannef222012007-07-11 06:42:04 +02001728 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1729 return -EILSEQ;
1730
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001731 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001732 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301733 if (rem < 0)
1734 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001735
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301736 data += (count - rem);
1737 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001738 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001739
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301740 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001741}
1742EXPORT_SYMBOL(hci_recv_fragment);
1743
Suraj Sumangala99811512010-07-14 13:02:19 +05301744#define STREAM_REASSEMBLY 0
1745
1746int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1747{
1748 int type;
1749 int rem = 0;
1750
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001751 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301752 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1753
1754 if (!skb) {
1755 struct { char type; } *pkt;
1756
1757 /* Start of the frame */
1758 pkt = data;
1759 type = pkt->type;
1760
1761 data++;
1762 count--;
1763 } else
1764 type = bt_cb(skb)->pkt_type;
1765
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001766 rem = hci_reassembly(hdev, type, data, count,
1767 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301768 if (rem < 0)
1769 return rem;
1770
1771 data += (count - rem);
1772 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001773 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301774
1775 return rem;
1776}
1777EXPORT_SYMBOL(hci_recv_stream_fragment);
1778
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779/* ---- Interface to upper protocols ---- */
1780
1781/* Register/Unregister protocols.
1782 * hci_task_lock is used to ensure that no tasks are running. */
1783int hci_register_proto(struct hci_proto *hp)
1784{
1785 int err = 0;
1786
1787 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1788
1789 if (hp->id >= HCI_MAX_PROTO)
1790 return -EINVAL;
1791
1792 write_lock_bh(&hci_task_lock);
1793
1794 if (!hci_proto[hp->id])
1795 hci_proto[hp->id] = hp;
1796 else
1797 err = -EEXIST;
1798
1799 write_unlock_bh(&hci_task_lock);
1800
1801 return err;
1802}
1803EXPORT_SYMBOL(hci_register_proto);
1804
1805int hci_unregister_proto(struct hci_proto *hp)
1806{
1807 int err = 0;
1808
1809 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1810
1811 if (hp->id >= HCI_MAX_PROTO)
1812 return -EINVAL;
1813
1814 write_lock_bh(&hci_task_lock);
1815
1816 if (hci_proto[hp->id])
1817 hci_proto[hp->id] = NULL;
1818 else
1819 err = -ENOENT;
1820
1821 write_unlock_bh(&hci_task_lock);
1822
1823 return err;
1824}
1825EXPORT_SYMBOL(hci_unregister_proto);
1826
1827int hci_register_cb(struct hci_cb *cb)
1828{
1829 BT_DBG("%p name %s", cb, cb->name);
1830
1831 write_lock_bh(&hci_cb_list_lock);
1832 list_add(&cb->list, &hci_cb_list);
1833 write_unlock_bh(&hci_cb_list_lock);
1834
1835 return 0;
1836}
1837EXPORT_SYMBOL(hci_register_cb);
1838
1839int hci_unregister_cb(struct hci_cb *cb)
1840{
1841 BT_DBG("%p name %s", cb, cb->name);
1842
1843 write_lock_bh(&hci_cb_list_lock);
1844 list_del(&cb->list);
1845 write_unlock_bh(&hci_cb_list_lock);
1846
1847 return 0;
1848}
1849EXPORT_SYMBOL(hci_unregister_cb);
1850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851int hci_register_amp(struct amp_mgr_cb *cb)
1852{
1853 BT_DBG("%p", cb);
1854
1855 write_lock_bh(&amp_mgr_cb_list_lock);
1856 list_add(&cb->list, &amp_mgr_cb_list);
1857 write_unlock_bh(&amp_mgr_cb_list_lock);
1858
1859 return 0;
1860}
1861EXPORT_SYMBOL(hci_register_amp);
1862
1863int hci_unregister_amp(struct amp_mgr_cb *cb)
1864{
1865 BT_DBG("%p", cb);
1866
1867 write_lock_bh(&amp_mgr_cb_list_lock);
1868 list_del(&cb->list);
1869 write_unlock_bh(&amp_mgr_cb_list_lock);
1870
1871 return 0;
1872}
1873EXPORT_SYMBOL(hci_unregister_amp);
1874
1875void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1876 struct sk_buff *skb)
1877{
1878 struct amp_mgr_cb *cb;
1879
1880 BT_DBG("opcode 0x%x", opcode);
1881
1882 read_lock_bh(&amp_mgr_cb_list_lock);
1883 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1884 if (cb->amp_cmd_complete_event)
1885 cb->amp_cmd_complete_event(hdev, opcode, skb);
1886 }
1887 read_unlock_bh(&amp_mgr_cb_list_lock);
1888}
1889
1890void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1891{
1892 struct amp_mgr_cb *cb;
1893
1894 BT_DBG("opcode 0x%x, status %d", opcode, status);
1895
1896 read_lock_bh(&amp_mgr_cb_list_lock);
1897 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1898 if (cb->amp_cmd_status_event)
1899 cb->amp_cmd_status_event(hdev, opcode, status);
1900 }
1901 read_unlock_bh(&amp_mgr_cb_list_lock);
1902}
1903
1904void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1905 struct sk_buff *skb)
1906{
1907 struct amp_mgr_cb *cb;
1908
1909 BT_DBG("ev_code 0x%x", ev_code);
1910
1911 read_lock_bh(&amp_mgr_cb_list_lock);
1912 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1913 if (cb->amp_event)
1914 cb->amp_event(hdev, ev_code, skb);
1915 }
1916 read_unlock_bh(&amp_mgr_cb_list_lock);
1917}
1918
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919static int hci_send_frame(struct sk_buff *skb)
1920{
1921 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1922
1923 if (!hdev) {
1924 kfree_skb(skb);
1925 return -ENODEV;
1926 }
1927
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001928 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 if (atomic_read(&hdev->promisc)) {
1931 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001932 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001934 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 }
1936
1937 /* Get rid of skb owner, prior to sending to the driver. */
1938 skb_orphan(skb);
1939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return hdev->send(skb);
1942}
1943
1944/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001945int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 int len = HCI_COMMAND_HDR_SIZE + plen;
1948 struct hci_command_hdr *hdr;
1949 struct sk_buff *skb;
1950
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001951 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
1953 skb = bt_skb_alloc(len, GFP_ATOMIC);
1954 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001955 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 return -ENOMEM;
1957 }
1958
1959 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001960 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 hdr->plen = plen;
1962
1963 if (plen)
1964 memcpy(skb_put(skb, plen), param, plen);
1965
1966 BT_DBG("skb len %d", skb->len);
1967
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001968 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001970
Johan Hedberga5040ef2011-01-10 13:28:59 +02001971 if (test_bit(HCI_INIT, &hdev->flags))
1972 hdev->init_last_cmd = opcode;
1973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001975 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 return 0;
1978}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001982void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983{
1984 struct hci_command_hdr *hdr;
1985
1986 if (!hdev->sent_cmd)
1987 return NULL;
1988
1989 hdr = (void *) hdev->sent_cmd->data;
1990
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001991 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 return NULL;
1993
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001994 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
1996 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1997}
1998
1999/* Send ACL data */
2000static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2001{
2002 struct hci_acl_hdr *hdr;
2003 int len = skb->len;
2004
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002005 skb_push(skb, HCI_ACL_HDR_SIZE);
2006 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002007 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002008 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2009 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010}
2011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002012void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2013 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014{
2015 struct hci_dev *hdev = conn->hdev;
2016 struct sk_buff *list;
2017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 if (hdev->dev_type == HCI_BREDR)
2023 hci_add_acl_hdr(skb, conn->handle, flags);
2024 else
2025 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002027 list = skb_shinfo(skb)->frag_list;
2028 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 /* Non fragmented */
2030 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2031
2032 skb_queue_tail(&conn->data_q, skb);
2033 } else {
2034 /* Fragmented */
2035 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2036
2037 skb_shinfo(skb)->frag_list = NULL;
2038
2039 /* Queue all fragments atomically */
2040 spin_lock_bh(&conn->data_q.lock);
2041
2042 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002043 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002044 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 do {
2046 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002049 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002050 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
2052 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2053
2054 __skb_queue_tail(&conn->data_q, skb);
2055 } while (list);
2056
2057 spin_unlock_bh(&conn->data_q.lock);
2058 }
2059
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002060 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061}
2062EXPORT_SYMBOL(hci_send_acl);
2063
2064/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002065void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066{
2067 struct hci_dev *hdev = conn->hdev;
2068 struct hci_sco_hdr hdr;
2069
2070 BT_DBG("%s len %d", hdev->name, skb->len);
2071
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002072 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 hdr.dlen = skb->len;
2074
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002075 skb_push(skb, HCI_SCO_HDR_SIZE);
2076 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002077 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
2079 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002080 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002083 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084}
2085EXPORT_SYMBOL(hci_send_sco);
2086
2087/* ---- HCI TX task (outgoing data) ---- */
2088
2089/* HCI Connection scheduler */
2090static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2091{
2092 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002093 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 int num = 0, min = ~0;
2095 struct list_head *p;
2096
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002097 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 * added and removed with TX task disabled. */
2099 list_for_each(p, &h->list) {
2100 struct hci_conn *c;
2101 c = list_entry(p, struct hci_conn, list);
2102
Marcel Holtmann769be972008-07-14 20:13:49 +02002103 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002105
2106 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2107 continue;
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 num++;
2110
2111 if (c->sent < min) {
2112 min = c->sent;
2113 conn = c;
2114 }
2115 }
2116
2117 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002118 int cnt, q;
2119
2120 switch (conn->type) {
2121 case ACL_LINK:
2122 cnt = hdev->acl_cnt;
2123 break;
2124 case SCO_LINK:
2125 case ESCO_LINK:
2126 cnt = hdev->sco_cnt;
2127 break;
2128 case LE_LINK:
2129 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2130 break;
2131 default:
2132 cnt = 0;
2133 BT_ERR("Unknown link type");
2134 }
2135
2136 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 *quote = q ? q : 1;
2138 } else
2139 *quote = 0;
2140
2141 BT_DBG("conn %p quote %d", conn, *quote);
2142 return conn;
2143}
2144
Ville Tervobae1f5d2011-02-10 22:38:53 -03002145static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146{
2147 struct hci_conn_hash *h = &hdev->conn_hash;
2148 struct list_head *p;
2149 struct hci_conn *c;
2150
Ville Tervobae1f5d2011-02-10 22:38:53 -03002151 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
2153 /* Kill stalled connections */
2154 list_for_each(p, &h->list) {
2155 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002156 if (c->type == type && c->sent) {
2157 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 hdev->name, batostr(&c->dst));
2159 hci_acl_disconn(c, 0x13);
2160 }
2161 }
2162}
2163
2164static inline void hci_sched_acl(struct hci_dev *hdev)
2165{
2166 struct hci_conn *conn;
2167 struct sk_buff *skb;
2168 int quote;
2169
2170 BT_DBG("%s", hdev->name);
2171
2172 if (!test_bit(HCI_RAW, &hdev->flags)) {
2173 /* ACL tx timeout must be longer than maximum
2174 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175 if (hdev->acl_cnt <= 0 &&
2176 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002177 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
2179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180 while (hdev->acl_cnt > 0 &&
2181 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2182 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2183 int count = 1;
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002187 if (hdev->flow_ctl_mode ==
2188 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2189 /* Calculate count of blocks used by
2190 * this packet
2191 */
2192 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2193 hdev->data_block_len) + 1;
2194
2195 if (count > hdev->acl_cnt)
2196 return;
2197
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002198 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002199
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 hci_send_frame(skb);
2201 hdev->acl_last_tx = jiffies;
2202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002203 hdev->acl_cnt -= count;
2204 quote -= count;
2205
2206 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 }
2208 }
2209}
2210
2211/* Schedule SCO */
2212static inline void hci_sched_sco(struct hci_dev *hdev)
2213{
2214 struct hci_conn *conn;
2215 struct sk_buff *skb;
2216 int quote;
2217
2218 BT_DBG("%s", hdev->name);
2219
2220 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2221 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2222 BT_DBG("skb %p len %d", skb, skb->len);
2223 hci_send_frame(skb);
2224
2225 conn->sent++;
2226 if (conn->sent == ~0)
2227 conn->sent = 0;
2228 }
2229 }
2230}
2231
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002232static inline void hci_sched_esco(struct hci_dev *hdev)
2233{
2234 struct hci_conn *conn;
2235 struct sk_buff *skb;
2236 int quote;
2237
2238 BT_DBG("%s", hdev->name);
2239
2240 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2241 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2242 BT_DBG("skb %p len %d", skb, skb->len);
2243 hci_send_frame(skb);
2244
2245 conn->sent++;
2246 if (conn->sent == ~0)
2247 conn->sent = 0;
2248 }
2249 }
2250}
2251
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002252static inline void hci_sched_le(struct hci_dev *hdev)
2253{
2254 struct hci_conn *conn;
2255 struct sk_buff *skb;
2256 int quote, cnt;
2257
2258 BT_DBG("%s", hdev->name);
2259
2260 if (!test_bit(HCI_RAW, &hdev->flags)) {
2261 /* LE tx timeout must be longer than maximum
2262 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002263 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002264 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002265 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002266 }
2267
2268 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2269 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2270 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2271 BT_DBG("skb %p len %d", skb, skb->len);
2272
2273 hci_send_frame(skb);
2274 hdev->le_last_tx = jiffies;
2275
2276 cnt--;
2277 conn->sent++;
2278 }
2279 }
2280 if (hdev->le_pkts)
2281 hdev->le_cnt = cnt;
2282 else
2283 hdev->acl_cnt = cnt;
2284}
2285
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286static void hci_tx_task(unsigned long arg)
2287{
2288 struct hci_dev *hdev = (struct hci_dev *) arg;
2289 struct sk_buff *skb;
2290
2291 read_lock(&hci_task_lock);
2292
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002293 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2294 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
2296 /* Schedule queues and send stuff to HCI driver */
2297
2298 hci_sched_acl(hdev);
2299
2300 hci_sched_sco(hdev);
2301
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002302 hci_sched_esco(hdev);
2303
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002304 hci_sched_le(hdev);
2305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 /* Send next queued raw (unknown type) packet */
2307 while ((skb = skb_dequeue(&hdev->raw_q)))
2308 hci_send_frame(skb);
2309
2310 read_unlock(&hci_task_lock);
2311}
2312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002313/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
2315/* ACL data packet */
2316static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2317{
2318 struct hci_acl_hdr *hdr = (void *) skb->data;
2319 struct hci_conn *conn;
2320 __u16 handle, flags;
2321
2322 skb_pull(skb, HCI_ACL_HDR_SIZE);
2323
2324 handle = __le16_to_cpu(hdr->handle);
2325 flags = hci_flags(handle);
2326 handle = hci_handle(handle);
2327
2328 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2329
2330 hdev->stat.acl_rx++;
2331
2332 hci_dev_lock(hdev);
2333 conn = hci_conn_hash_lookup_handle(hdev, handle);
2334 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002335
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 if (conn) {
2337 register struct hci_proto *hp;
2338
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002339 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002340
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002342 hp = hci_proto[HCI_PROTO_L2CAP];
2343 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 hp->recv_acldata(conn, skb, flags);
2345 return;
2346 }
2347 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002348 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349 hdev->name, handle);
2350 }
2351
2352 kfree_skb(skb);
2353}
2354
2355/* SCO data packet */
2356static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2357{
2358 struct hci_sco_hdr *hdr = (void *) skb->data;
2359 struct hci_conn *conn;
2360 __u16 handle;
2361
2362 skb_pull(skb, HCI_SCO_HDR_SIZE);
2363
2364 handle = __le16_to_cpu(hdr->handle);
2365
2366 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2367
2368 hdev->stat.sco_rx++;
2369
2370 hci_dev_lock(hdev);
2371 conn = hci_conn_hash_lookup_handle(hdev, handle);
2372 hci_dev_unlock(hdev);
2373
2374 if (conn) {
2375 register struct hci_proto *hp;
2376
2377 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002378 hp = hci_proto[HCI_PROTO_SCO];
2379 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 hp->recv_scodata(conn, skb);
2381 return;
2382 }
2383 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002384 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 hdev->name, handle);
2386 }
2387
2388 kfree_skb(skb);
2389}
2390
Marcel Holtmann65164552005-10-28 19:20:48 +02002391static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392{
2393 struct hci_dev *hdev = (struct hci_dev *) arg;
2394 struct sk_buff *skb;
2395
2396 BT_DBG("%s", hdev->name);
2397
2398 read_lock(&hci_task_lock);
2399
2400 while ((skb = skb_dequeue(&hdev->rx_q))) {
2401 if (atomic_read(&hdev->promisc)) {
2402 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002403 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 }
2405
2406 if (test_bit(HCI_RAW, &hdev->flags)) {
2407 kfree_skb(skb);
2408 continue;
2409 }
2410
2411 if (test_bit(HCI_INIT, &hdev->flags)) {
2412 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002413 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 case HCI_ACLDATA_PKT:
2415 case HCI_SCODATA_PKT:
2416 kfree_skb(skb);
2417 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 }
2420
2421 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002422 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 case HCI_EVENT_PKT:
2424 hci_event_packet(hdev, skb);
2425 break;
2426
2427 case HCI_ACLDATA_PKT:
2428 BT_DBG("%s ACL data packet", hdev->name);
2429 hci_acldata_packet(hdev, skb);
2430 break;
2431
2432 case HCI_SCODATA_PKT:
2433 BT_DBG("%s SCO data packet", hdev->name);
2434 hci_scodata_packet(hdev, skb);
2435 break;
2436
2437 default:
2438 kfree_skb(skb);
2439 break;
2440 }
2441 }
2442
2443 read_unlock(&hci_task_lock);
2444}
2445
2446static void hci_cmd_task(unsigned long arg)
2447{
2448 struct hci_dev *hdev = (struct hci_dev *) arg;
2449 struct sk_buff *skb;
2450
2451 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002454 if (atomic_read(&hdev->cmd_cnt)) {
2455 skb = skb_dequeue(&hdev->cmd_q);
2456 if (!skb)
2457 return;
2458
Wei Yongjun7585b972009-02-25 18:29:52 +08002459 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002461 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2462 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 atomic_dec(&hdev->cmd_cnt);
2464 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002466 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 } else {
2468 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002469 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 }
2471 }
2472}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473
2474module_param(enable_smp, bool, 0644);
2475MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");