blob: a27358f54161f70d52136420dddaff36bca1f27a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219 skb_queue_purge(&hdev->driver_init);
220
221 /* Mandatory initialization */
222
223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
308}
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __u8 scan = opt;
313
314 BT_DBG("%s %x", hdev->name, scan);
315
316 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200317 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
321{
322 __u8 auth = opt;
323
324 BT_DBG("%s %x", hdev->name, auth);
325
326 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200327 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
331{
332 __u8 encrypt = opt;
333
334 BT_DBG("%s %x", hdev->name, encrypt);
335
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200336 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200337 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200340static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
341{
342 __le16 policy = cpu_to_le16(opt);
343
Marcel Holtmanna418b892008-11-30 12:17:28 +0100344 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200345
346 /* Default link policy */
347 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
348}
349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900350/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 * Device is held on return. */
352struct hci_dev *hci_dev_get(int index)
353{
354 struct hci_dev *hdev = NULL;
355 struct list_head *p;
356
357 BT_DBG("%d", index);
358
359 if (index < 0)
360 return NULL;
361
362 read_lock(&hci_dev_list_lock);
363 list_for_each(p, &hci_dev_list) {
364 struct hci_dev *d = list_entry(p, struct hci_dev, list);
365 if (d->id == index) {
366 hdev = hci_dev_hold(d);
367 break;
368 }
369 }
370 read_unlock(&hci_dev_list_lock);
371 return hdev;
372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375/* ---- Inquiry support ---- */
376static void inquiry_cache_flush(struct hci_dev *hdev)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *next = cache->list, *e;
380
381 BT_DBG("cache %p", cache);
382
383 cache->list = NULL;
384 while ((e = next)) {
385 next = e->next;
386 kfree(e);
387 }
388}
389
390struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_entry *e;
394
395 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
396
397 for (e = cache->list; e; e = e->next)
398 if (!bacmp(&e->data.bdaddr, bdaddr))
399 break;
400 return e;
401}
402
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
404{
405 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200406 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
409
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200410 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
411 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200413 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
414 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416
417 ie->next = cache->list;
418 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200421 memcpy(&ie->data, data, sizeof(*data));
422 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 cache->timestamp = jiffies;
424}
425
426static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
427{
428 struct inquiry_cache *cache = &hdev->inq_cache;
429 struct inquiry_info *info = (struct inquiry_info *) buf;
430 struct inquiry_entry *e;
431 int copied = 0;
432
433 for (e = cache->list; e && copied < num; e = e->next, copied++) {
434 struct inquiry_data *data = &e->data;
435 bacpy(&info->bdaddr, &data->bdaddr);
436 info->pscan_rep_mode = data->pscan_rep_mode;
437 info->pscan_period_mode = data->pscan_period_mode;
438 info->pscan_mode = data->pscan_mode;
439 memcpy(info->dev_class, data->dev_class, 3);
440 info->clock_offset = data->clock_offset;
441 info++;
442 }
443
444 BT_DBG("cache %p, copied %d", cache, copied);
445 return copied;
446}
447
448static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
449{
450 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
451 struct hci_cp_inquiry cp;
452
453 BT_DBG("%s", hdev->name);
454
455 if (test_bit(HCI_INQUIRY, &hdev->flags))
456 return;
457
458 /* Start Inquiry */
459 memcpy(&cp.lap, &ir->lap, 3);
460 cp.length = ir->length;
461 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200462 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465int hci_inquiry(void __user *arg)
466{
467 __u8 __user *ptr = arg;
468 struct hci_inquiry_req ir;
469 struct hci_dev *hdev;
470 int err = 0, do_inquiry = 0, max_rsp;
471 long timeo;
472 __u8 *buf;
473
474 if (copy_from_user(&ir, ptr, sizeof(ir)))
475 return -EFAULT;
476
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200477 hdev = hci_dev_get(ir.dev_id);
478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -ENODEV;
480
481 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900482 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 inquiry_cache_empty(hdev) ||
484 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 inquiry_cache_flush(hdev);
486 do_inquiry = 1;
487 }
488 hci_dev_unlock_bh(hdev);
489
Marcel Holtmann04837f62006-07-03 10:02:33 +0200490 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491
492 if (do_inquiry) {
493 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
494 if (err < 0)
495 goto done;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* for unlimited number of responses we will use buffer with 255 entries */
499 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
500
501 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
502 * copy it to the user space.
503 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100504 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -ENOMEM;
507 goto done;
508 }
509
510 hci_dev_lock_bh(hdev);
511 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
512 hci_dev_unlock_bh(hdev);
513
514 BT_DBG("num_rsp %d", ir.num_rsp);
515
516 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
517 ptr += sizeof(ir);
518 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 ir.num_rsp))
520 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900521 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 err = -EFAULT;
523
524 kfree(buf);
525
526done:
527 hci_dev_put(hdev);
528 return err;
529}
530
531/* ---- HCI ioctl helpers ---- */
532
533int hci_dev_open(__u16 dev)
534{
535 struct hci_dev *hdev;
536 int ret = 0;
537
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200538 hdev = hci_dev_get(dev);
539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -ENODEV;
541
542 BT_DBG("%s %p", hdev->name, hdev);
543
544 hci_req_lock(hdev);
545
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200546 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
547 ret = -ERFKILL;
548 goto done;
549 }
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (test_bit(HCI_UP, &hdev->flags)) {
552 ret = -EALREADY;
553 goto done;
554 }
555
556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
557 set_bit(HCI_RAW, &hdev->flags);
558
559 if (hdev->open(hdev)) {
560 ret = -EIO;
561 goto done;
562 }
563
564 if (!test_bit(HCI_RAW, &hdev->flags)) {
565 atomic_set(&hdev->cmd_cnt, 1);
566 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200567 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Marcel Holtmann04837f62006-07-03 10:02:33 +0200569 ret = __hci_request(hdev, hci_init_req, 0,
570 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300573 ret = __hci_request(hdev, hci_le_init_req, 0,
574 msecs_to_jiffies(HCI_INIT_TIMEOUT));
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 clear_bit(HCI_INIT, &hdev->flags);
577 }
578
579 if (!ret) {
580 hci_dev_hold(hdev);
581 set_bit(HCI_UP, &hdev->flags);
582 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700583 if (!test_bit(HCI_SETUP, &hdev->flags) &&
584 hdev->dev_type == HCI_BREDR)
Johan Hedberg5add6af2010-12-16 10:00:37 +0200585 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900586 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 /* Init failed, cleanup */
588 tasklet_kill(&hdev->rx_task);
589 tasklet_kill(&hdev->tx_task);
590 tasklet_kill(&hdev->cmd_task);
591
592 skb_queue_purge(&hdev->cmd_q);
593 skb_queue_purge(&hdev->rx_q);
594
595 if (hdev->flush)
596 hdev->flush(hdev);
597
598 if (hdev->sent_cmd) {
599 kfree_skb(hdev->sent_cmd);
600 hdev->sent_cmd = NULL;
601 }
602
603 hdev->close(hdev);
604 hdev->flags = 0;
605 }
606
607done:
608 hci_req_unlock(hdev);
609 hci_dev_put(hdev);
610 return ret;
611}
612
613static int hci_dev_do_close(struct hci_dev *hdev)
614{
Mat Martineau4106b992011-11-18 15:26:21 -0800615 unsigned long keepflags = 0;
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 BT_DBG("%s %p", hdev->name, hdev);
618
619 hci_req_cancel(hdev, ENODEV);
620 hci_req_lock(hdev);
621
622 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300623 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 hci_req_unlock(hdev);
625 return 0;
626 }
627
628 /* Kill RX and TX tasks */
629 tasklet_kill(&hdev->rx_task);
630 tasklet_kill(&hdev->tx_task);
631
632 hci_dev_lock_bh(hdev);
633 inquiry_cache_flush(hdev);
634 hci_conn_hash_flush(hdev);
635 hci_dev_unlock_bh(hdev);
636
637 hci_notify(hdev, HCI_DEV_DOWN);
638
639 if (hdev->flush)
640 hdev->flush(hdev);
641
642 /* Reset device */
643 skb_queue_purge(&hdev->cmd_q);
644 atomic_set(&hdev->cmd_cnt, 1);
645 if (!test_bit(HCI_RAW, &hdev->flags)) {
646 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200647 __hci_request(hdev, hci_reset_req, 0,
648 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 clear_bit(HCI_INIT, &hdev->flags);
650 }
651
652 /* Kill cmd task */
653 tasklet_kill(&hdev->cmd_task);
654
655 /* Drop queues */
656 skb_queue_purge(&hdev->rx_q);
657 skb_queue_purge(&hdev->cmd_q);
658 skb_queue_purge(&hdev->raw_q);
659
660 /* Drop last sent command */
661 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300662 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 kfree_skb(hdev->sent_cmd);
664 hdev->sent_cmd = NULL;
665 }
666
667 /* After this point our queues are empty
668 * and no tasks are scheduled. */
669 hdev->close(hdev);
670
Peter Krystad1fc44072011-08-30 15:38:12 -0700671 if (hdev->dev_type == HCI_BREDR)
672 mgmt_powered(hdev->id, 0);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200673
Mat Martineau4106b992011-11-18 15:26:21 -0800674 /* Clear only non-persistent flags */
675 if (test_bit(HCI_MGMT, &hdev->flags))
676 set_bit(HCI_MGMT, &keepflags);
677 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
678 set_bit(HCI_LINK_KEYS, &keepflags);
679 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
680 set_bit(HCI_DEBUG_KEYS, &keepflags);
681
682 hdev->flags = keepflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683
684 hci_req_unlock(hdev);
685
686 hci_dev_put(hdev);
687 return 0;
688}
689
690int hci_dev_close(__u16 dev)
691{
692 struct hci_dev *hdev;
693 int err;
694
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200695 hdev = hci_dev_get(dev);
696 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 return -ENODEV;
698 err = hci_dev_do_close(hdev);
699 hci_dev_put(hdev);
700 return err;
701}
702
703int hci_dev_reset(__u16 dev)
704{
705 struct hci_dev *hdev;
706 int ret = 0;
707
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200708 hdev = hci_dev_get(dev);
709 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 return -ENODEV;
711
712 hci_req_lock(hdev);
713 tasklet_disable(&hdev->tx_task);
714
715 if (!test_bit(HCI_UP, &hdev->flags))
716 goto done;
717
718 /* Drop queues */
719 skb_queue_purge(&hdev->rx_q);
720 skb_queue_purge(&hdev->cmd_q);
721
722 hci_dev_lock_bh(hdev);
723 inquiry_cache_flush(hdev);
724 hci_conn_hash_flush(hdev);
725 hci_dev_unlock_bh(hdev);
726
727 if (hdev->flush)
728 hdev->flush(hdev);
729
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900730 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300731 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732
733 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200734 ret = __hci_request(hdev, hci_reset_req, 0,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736
737done:
738 tasklet_enable(&hdev->tx_task);
739 hci_req_unlock(hdev);
740 hci_dev_put(hdev);
741 return ret;
742}
743
744int hci_dev_reset_stat(__u16 dev)
745{
746 struct hci_dev *hdev;
747 int ret = 0;
748
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200749 hdev = hci_dev_get(dev);
750 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 return -ENODEV;
752
753 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
754
755 hci_dev_put(hdev);
756
757 return ret;
758}
759
760int hci_dev_cmd(unsigned int cmd, void __user *arg)
761{
762 struct hci_dev *hdev;
763 struct hci_dev_req dr;
764 int err = 0;
765
766 if (copy_from_user(&dr, arg, sizeof(dr)))
767 return -EFAULT;
768
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200769 hdev = hci_dev_get(dr.dev_id);
770 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 return -ENODEV;
772
773 switch (cmd) {
774 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200775 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 break;
778
779 case HCISETENCRYPT:
780 if (!lmp_encrypt_capable(hdev)) {
781 err = -EOPNOTSUPP;
782 break;
783 }
784
785 if (!test_bit(HCI_AUTH, &hdev->flags)) {
786 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200787 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
788 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 if (err)
790 break;
791 }
792
Marcel Holtmann04837f62006-07-03 10:02:33 +0200793 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 break;
796
797 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200798 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
799 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 break;
801
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200802 case HCISETLINKPOL:
803 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
804 msecs_to_jiffies(HCI_INIT_TIMEOUT));
805 break;
806
807 case HCISETLINKMODE:
808 hdev->link_mode = ((__u16) dr.dev_opt) &
809 (HCI_LM_MASTER | HCI_LM_ACCEPT);
810 break;
811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 case HCISETPTYPE:
813 hdev->pkt_type = (__u16) dr.dev_opt;
814 break;
815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200817 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
818 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 break;
820
821 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200822 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
823 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 break;
825
826 default:
827 err = -EINVAL;
828 break;
829 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 hci_dev_put(hdev);
832 return err;
833}
834
835int hci_get_dev_list(void __user *arg)
836{
837 struct hci_dev_list_req *dl;
838 struct hci_dev_req *dr;
839 struct list_head *p;
840 int n = 0, size, err;
841 __u16 dev_num;
842
843 if (get_user(dev_num, (__u16 __user *) arg))
844 return -EFAULT;
845
846 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
847 return -EINVAL;
848
849 size = sizeof(*dl) + dev_num * sizeof(*dr);
850
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200851 dl = kzalloc(size, GFP_KERNEL);
852 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 return -ENOMEM;
854
855 dr = dl->dev_req;
856
857 read_lock_bh(&hci_dev_list_lock);
858 list_for_each(p, &hci_dev_list) {
859 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200862
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200863 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200864
865 if (!test_bit(HCI_MGMT, &hdev->flags))
866 set_bit(HCI_PAIRABLE, &hdev->flags);
867
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 (dr + n)->dev_id = hdev->id;
869 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 if (++n >= dev_num)
872 break;
873 }
874 read_unlock_bh(&hci_dev_list_lock);
875
876 dl->dev_num = n;
877 size = sizeof(*dl) + n * sizeof(*dr);
878
879 err = copy_to_user(arg, dl, size);
880 kfree(dl);
881
882 return err ? -EFAULT : 0;
883}
884
885int hci_get_dev_info(void __user *arg)
886{
887 struct hci_dev *hdev;
888 struct hci_dev_info di;
889 int err = 0;
890
891 if (copy_from_user(&di, arg, sizeof(di)))
892 return -EFAULT;
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 hdev = hci_dev_get(di.dev_id);
895 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return -ENODEV;
897
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200898 hci_del_off_timer(hdev);
899
Johan Hedbergc542a062011-01-26 13:11:03 +0200900 if (!test_bit(HCI_MGMT, &hdev->flags))
901 set_bit(HCI_PAIRABLE, &hdev->flags);
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 strcpy(di.name, hdev->name);
904 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100905 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 di.flags = hdev->flags;
907 di.pkt_type = hdev->pkt_type;
908 di.acl_mtu = hdev->acl_mtu;
909 di.acl_pkts = hdev->acl_pkts;
910 di.sco_mtu = hdev->sco_mtu;
911 di.sco_pkts = hdev->sco_pkts;
912 di.link_policy = hdev->link_policy;
913 di.link_mode = hdev->link_mode;
914
915 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
916 memcpy(&di.features, &hdev->features, sizeof(di.features));
917
918 if (copy_to_user(arg, &di, sizeof(di)))
919 err = -EFAULT;
920
921 hci_dev_put(hdev);
922
923 return err;
924}
925
926/* ---- Interface to HCI drivers ---- */
927
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200928static int hci_rfkill_set_block(void *data, bool blocked)
929{
930 struct hci_dev *hdev = data;
931
932 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
933
934 if (!blocked)
935 return 0;
936
937 hci_dev_do_close(hdev);
938
939 return 0;
940}
941
942static const struct rfkill_ops hci_rfkill_ops = {
943 .set_block = hci_rfkill_set_block,
944};
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946/* Alloc HCI device */
947struct hci_dev *hci_alloc_dev(void)
948{
949 struct hci_dev *hdev;
950
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200951 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 if (!hdev)
953 return NULL;
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 skb_queue_head_init(&hdev->driver_init);
956
957 return hdev;
958}
959EXPORT_SYMBOL(hci_alloc_dev);
960
961/* Free HCI device */
962void hci_free_dev(struct hci_dev *hdev)
963{
964 skb_queue_purge(&hdev->driver_init);
965
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200966 /* will free via device release */
967 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968}
969EXPORT_SYMBOL(hci_free_dev);
970
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200971static void hci_power_on(struct work_struct *work)
972{
973 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700974 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200975
976 BT_DBG("%s", hdev->name);
977
Inga Stotland5029fc22011-09-12 15:22:52 -0700978 err = hci_dev_open(hdev->id);
979 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200980 return;
981
Peter Krystad1fc44072011-08-30 15:38:12 -0700982 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
983 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200984 mod_timer(&hdev->off_timer,
985 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
986
Peter Krystad1fc44072011-08-30 15:38:12 -0700987 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
988 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200989 mgmt_index_added(hdev->id);
990}
991
992static void hci_power_off(struct work_struct *work)
993{
994 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
995
996 BT_DBG("%s", hdev->name);
997
998 hci_dev_close(hdev->id);
999}
1000
1001static void hci_auto_off(unsigned long data)
1002{
1003 struct hci_dev *hdev = (struct hci_dev *) data;
1004
1005 BT_DBG("%s", hdev->name);
1006
1007 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1008
1009 queue_work(hdev->workqueue, &hdev->power_off);
1010}
1011
1012void hci_del_off_timer(struct hci_dev *hdev)
1013{
1014 BT_DBG("%s", hdev->name);
1015
1016 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1017 del_timer(&hdev->off_timer);
1018}
1019
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001020int hci_uuids_clear(struct hci_dev *hdev)
1021{
1022 struct list_head *p, *n;
1023
1024 list_for_each_safe(p, n, &hdev->uuids) {
1025 struct bt_uuid *uuid;
1026
1027 uuid = list_entry(p, struct bt_uuid, list);
1028
1029 list_del(p);
1030 kfree(uuid);
1031 }
1032
1033 return 0;
1034}
1035
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001036int hci_link_keys_clear(struct hci_dev *hdev)
1037{
1038 struct list_head *p, *n;
1039
1040 list_for_each_safe(p, n, &hdev->link_keys) {
1041 struct link_key *key;
1042
1043 key = list_entry(p, struct link_key, list);
1044
1045 list_del(p);
1046 kfree(key);
1047 }
1048
1049 return 0;
1050}
1051
1052struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1053{
1054 struct list_head *p;
1055
1056 list_for_each(p, &hdev->link_keys) {
1057 struct link_key *k;
1058
1059 k = list_entry(p, struct link_key, list);
1060
1061 if (bacmp(bdaddr, &k->bdaddr) == 0)
1062 return k;
1063 }
1064
1065 return NULL;
1066}
1067
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001068struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1069{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001070 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 list_for_each(p, &hdev->link_keys) {
1073 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001074 struct key_master_id *id;
1075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 k = list_entry(p, struct link_key, list);
1077
Brian Gixcf956772011-10-20 15:18:51 -07001078 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001079 continue;
1080
1081 if (k->dlen != sizeof(*id))
1082 continue;
1083
1084 id = (void *) &k->data;
1085 if (id->ediv == ediv &&
1086 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1087 return k;
1088 }
1089
1090 return NULL;
1091}
1092EXPORT_SYMBOL(hci_find_ltk);
1093
1094struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1095 bdaddr_t *bdaddr, u8 type)
1096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001098
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099 list_for_each(p, &hdev->link_keys) {
1100 struct link_key *k;
1101
1102 k = list_entry(p, struct link_key, list);
1103
Brian Gixcf956772011-10-20 15:18:51 -07001104 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001105 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001107
1108 return NULL;
1109}
1110EXPORT_SYMBOL(hci_find_link_key_type);
1111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1113 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001114{
1115 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001116 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001117 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001118 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001119
1120 old_key = hci_find_link_key(hdev, bdaddr);
1121 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001122 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001123 key = old_key;
1124 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001126 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1127 if (!key)
1128 return -ENOMEM;
1129 list_add(&key->list, &hdev->link_keys);
1130 }
1131
1132 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1133
1134 bacpy(&key->bdaddr, bdaddr);
1135 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001136 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001137 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001138 key->pin_len = pin_len;
1139
Brian Gixa68668b2011-08-11 15:49:36 -07001140 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301141 /* Store the link key persistently if one of the following is true:
1142 * 1. the remote side is using dedicated bonding since in that case
1143 * also the local requirements are set to dedicated bonding
1144 * 2. the local side had dedicated bonding as a requirement
1145 * 3. this is a legacy link key
1146 * 4. this is a changed combination key and there was a previously
1147 * stored one
1148 * If none of the above match only keep the link key around for
1149 * this connection and set the temporary flag for the device.
1150 */
Brian Gixa68668b2011-08-11 15:49:36 -07001151
Brian Gixdfdd9362011-08-18 09:58:02 -07001152 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301153 if ((conn->remote_auth > 0x01) ||
1154 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001155 (key->key_type < 0x03) ||
1156 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001157 bonded = 1;
1158 }
Brian Gixa68668b2011-08-11 15:49:36 -07001159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001160 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001161 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162
1163 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001164 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001165
1166 return 0;
1167}
1168
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001169int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001170 u8 addr_type, u8 key_size, u8 auth,
1171 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001172{
1173 struct link_key *key, *old_key;
1174 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001175
Brian Gixcf956772011-10-20 15:18:51 -07001176 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1177 batostr(bdaddr), addr_type);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001180 if (old_key) {
1181 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001182 } else {
1183 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1184 if (!key)
1185 return -ENOMEM;
1186 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001187 }
1188
1189 key->dlen = sizeof(*id);
1190
1191 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001192 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001193 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001194 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001195 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001196 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001197
1198 id = (void *) &key->data;
1199 id->ediv = ediv;
1200 memcpy(id->rand, rand, sizeof(id->rand));
1201
1202 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001203 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001204
1205 return 0;
1206}
1207
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001208int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209{
1210 struct link_key *key;
1211
1212 key = hci_find_link_key(hdev, bdaddr);
1213 if (!key)
1214 return -ENOENT;
1215
1216 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1217
1218 list_del(&key->list);
1219 kfree(key);
1220
1221 return 0;
1222}
1223
Ville Tervo6bd32322011-02-16 16:32:41 +02001224/* HCI command timer function */
1225static void hci_cmd_timer(unsigned long arg)
1226{
1227 struct hci_dev *hdev = (void *) arg;
1228
1229 BT_ERR("%s command tx timeout", hdev->name);
1230 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001231 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001232 tasklet_schedule(&hdev->cmd_task);
1233}
1234
Szymon Janc2763eda2011-03-22 13:12:22 +01001235struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1236 bdaddr_t *bdaddr)
1237{
1238 struct oob_data *data;
1239
1240 list_for_each_entry(data, &hdev->remote_oob_data, list)
1241 if (bacmp(bdaddr, &data->bdaddr) == 0)
1242 return data;
1243
1244 return NULL;
1245}
1246
1247int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1248{
1249 struct oob_data *data;
1250
1251 data = hci_find_remote_oob_data(hdev, bdaddr);
1252 if (!data)
1253 return -ENOENT;
1254
1255 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1256
1257 list_del(&data->list);
1258 kfree(data);
1259
1260 return 0;
1261}
1262
1263int hci_remote_oob_data_clear(struct hci_dev *hdev)
1264{
1265 struct oob_data *data, *n;
1266
1267 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1268 list_del(&data->list);
1269 kfree(data);
1270 }
1271
1272 return 0;
1273}
1274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275static void hci_adv_clear(unsigned long arg)
1276{
1277 struct hci_dev *hdev = (void *) arg;
1278
1279 hci_adv_entries_clear(hdev);
1280}
1281
1282int hci_adv_entries_clear(struct hci_dev *hdev)
1283{
1284 struct list_head *p, *n;
1285
Brian Gixa68668b2011-08-11 15:49:36 -07001286 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001287 write_lock_bh(&hdev->adv_entries_lock);
1288
1289 list_for_each_safe(p, n, &hdev->adv_entries) {
1290 struct adv_entry *entry;
1291
1292 entry = list_entry(p, struct adv_entry, list);
1293
1294 list_del(p);
1295 kfree(entry);
1296 }
1297
1298 write_unlock_bh(&hdev->adv_entries_lock);
1299
1300 return 0;
1301}
1302
1303struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1304{
1305 struct list_head *p;
1306 struct adv_entry *res = NULL;
1307
Brian Gixa68668b2011-08-11 15:49:36 -07001308 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309 read_lock_bh(&hdev->adv_entries_lock);
1310
1311 list_for_each(p, &hdev->adv_entries) {
1312 struct adv_entry *entry;
1313
1314 entry = list_entry(p, struct adv_entry, list);
1315
1316 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1317 res = entry;
1318 goto out;
1319 }
1320 }
1321out:
1322 read_unlock_bh(&hdev->adv_entries_lock);
1323 return res;
1324}
1325
1326static inline int is_connectable_adv(u8 evt_type)
1327{
1328 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1329 return 1;
1330
1331 return 0;
1332}
1333
Szymon Janc2763eda2011-03-22 13:12:22 +01001334int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1335 u8 *randomizer)
1336{
1337 struct oob_data *data;
1338
1339 data = hci_find_remote_oob_data(hdev, bdaddr);
1340
1341 if (!data) {
1342 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1343 if (!data)
1344 return -ENOMEM;
1345
1346 bacpy(&data->bdaddr, bdaddr);
1347 list_add(&data->list, &hdev->remote_oob_data);
1348 }
1349
1350 memcpy(data->hash, hash, sizeof(data->hash));
1351 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1352
1353 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1354
1355 return 0;
1356}
1357
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001358int hci_add_adv_entry(struct hci_dev *hdev,
1359 struct hci_ev_le_advertising_info *ev)
1360{
1361 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001362 u8 flags = 0;
1363 int i;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001364
Brian Gixa68668b2011-08-11 15:49:36 -07001365 BT_DBG("");
1366
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001367 if (!is_connectable_adv(ev->evt_type))
1368 return -EINVAL;
1369
Brian Gixfdd38922011-09-28 16:23:48 -07001370 if (ev->data && ev->length) {
1371 for (i = 0; (i + 2) < ev->length; i++)
1372 if (ev->data[i+1] == 0x01) {
1373 flags = ev->data[i+2];
1374 BT_DBG("flags: %2.2x", flags);
1375 break;
1376 } else {
1377 i += ev->data[i];
1378 }
1379 }
1380
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001381 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001382 /* Only new entries should be added to adv_entries. So, if
1383 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001384 if (entry) {
1385 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001386 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001387 }
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001388
1389 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1390 if (!entry)
1391 return -ENOMEM;
1392
1393 bacpy(&entry->bdaddr, &ev->bdaddr);
1394 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001395 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001398 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001400
1401 return 0;
1402}
1403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404static struct crypto_blkcipher *alloc_cypher(void)
1405{
1406 if (enable_smp)
1407 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1408
1409 return ERR_PTR(-ENOTSUPP);
1410}
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412/* Register HCI device */
1413int hci_register_dev(struct hci_dev *hdev)
1414{
1415 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001416 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001418 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1419 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 if (!hdev->open || !hdev->close || !hdev->destruct)
1422 return -EINVAL;
1423
Peter Krystad462bf762011-09-19 14:20:20 -07001424 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 write_lock_bh(&hci_dev_list_lock);
1427
1428 /* Find first available device id */
1429 list_for_each(p, &hci_dev_list) {
1430 if (list_entry(p, struct hci_dev, list)->id != id)
1431 break;
1432 head = p; id++;
1433 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 sprintf(hdev->name, "hci%d", id);
1436 hdev->id = id;
1437 list_add(&hdev->list, head);
1438
1439 atomic_set(&hdev->refcnt, 1);
1440 spin_lock_init(&hdev->lock);
1441
1442 hdev->flags = 0;
1443 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001444 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001446 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Marcel Holtmann04837f62006-07-03 10:02:33 +02001448 hdev->idle_timeout = 0;
1449 hdev->sniff_max_interval = 800;
1450 hdev->sniff_min_interval = 80;
1451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001452 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1454 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1455
1456 skb_queue_head_init(&hdev->rx_q);
1457 skb_queue_head_init(&hdev->cmd_q);
1458 skb_queue_head_init(&hdev->raw_q);
1459
Ville Tervo6bd32322011-02-16 16:32:41 +02001460 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1461
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301462 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001463 hdev->reassembly[i] = NULL;
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001466 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 inquiry_cache_init(hdev);
1469
1470 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001471 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
David Millerea4bd8b2010-07-30 21:54:49 -07001473 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001474
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001475 INIT_LIST_HEAD(&hdev->uuids);
1476
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001477 INIT_LIST_HEAD(&hdev->link_keys);
1478
Szymon Janc2763eda2011-03-22 13:12:22 +01001479 INIT_LIST_HEAD(&hdev->remote_oob_data);
1480
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001481 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 rwlock_init(&hdev->adv_entries_lock);
1483 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001484
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001485 INIT_WORK(&hdev->power_on, hci_power_on);
1486 INIT_WORK(&hdev->power_off, hci_power_off);
1487 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1490
1491 atomic_set(&hdev->promisc, 0);
1492
1493 write_unlock_bh(&hci_dev_list_lock);
1494
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001495 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1496 if (!hdev->workqueue)
1497 goto nomem;
1498
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001500 if (IS_ERR(hdev->tfm))
1501 BT_INFO("Failed to load transform for ecb(aes): %ld",
1502 PTR_ERR(hdev->tfm));
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 hci_register_sysfs(hdev);
1505
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001506 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1507 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1508 if (hdev->rfkill) {
1509 if (rfkill_register(hdev->rfkill) < 0) {
1510 rfkill_destroy(hdev->rfkill);
1511 hdev->rfkill = NULL;
1512 }
1513 }
1514
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001515 set_bit(HCI_AUTO_OFF, &hdev->flags);
1516 set_bit(HCI_SETUP, &hdev->flags);
1517 queue_work(hdev->workqueue, &hdev->power_on);
1518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 hci_notify(hdev, HCI_DEV_REG);
1520
1521 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001522
1523nomem:
1524 write_lock_bh(&hci_dev_list_lock);
1525 list_del(&hdev->list);
1526 write_unlock_bh(&hci_dev_list_lock);
1527
1528 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529}
1530EXPORT_SYMBOL(hci_register_dev);
1531
1532/* Unregister HCI device */
1533int hci_unregister_dev(struct hci_dev *hdev)
1534{
Marcel Holtmannef222012007-07-11 06:42:04 +02001535 int i;
1536
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001537 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 write_lock_bh(&hci_dev_list_lock);
1540 list_del(&hdev->list);
1541 write_unlock_bh(&hci_dev_list_lock);
1542
1543 hci_dev_do_close(hdev);
1544
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301545 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001546 kfree_skb(hdev->reassembly[i]);
1547
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001548 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001549 !test_bit(HCI_SETUP, &hdev->flags) &&
1550 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001551 mgmt_index_removed(hdev->id);
1552
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001553 if (!IS_ERR(hdev->tfm))
1554 crypto_free_blkcipher(hdev->tfm);
1555
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 hci_notify(hdev, HCI_DEV_UNREG);
1557
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001558 if (hdev->rfkill) {
1559 rfkill_unregister(hdev->rfkill);
1560 rfkill_destroy(hdev->rfkill);
1561 }
1562
Dave Young147e2d52008-03-05 18:45:59 -08001563 hci_unregister_sysfs(hdev);
1564
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001565 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001566 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001567
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001568 destroy_workqueue(hdev->workqueue);
1569
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001570 hci_dev_lock_bh(hdev);
1571 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001572 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001573 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001574 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001575 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001576 hci_dev_unlock_bh(hdev);
1577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return 0;
1581}
1582EXPORT_SYMBOL(hci_unregister_dev);
1583
1584/* Suspend HCI device */
1585int hci_suspend_dev(struct hci_dev *hdev)
1586{
1587 hci_notify(hdev, HCI_DEV_SUSPEND);
1588 return 0;
1589}
1590EXPORT_SYMBOL(hci_suspend_dev);
1591
1592/* Resume HCI device */
1593int hci_resume_dev(struct hci_dev *hdev)
1594{
1595 hci_notify(hdev, HCI_DEV_RESUME);
1596 return 0;
1597}
1598EXPORT_SYMBOL(hci_resume_dev);
1599
Marcel Holtmann76bca882009-11-18 00:40:39 +01001600/* Receive frame from HCI drivers */
1601int hci_recv_frame(struct sk_buff *skb)
1602{
1603 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1604 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1605 && !test_bit(HCI_INIT, &hdev->flags))) {
1606 kfree_skb(skb);
1607 return -ENXIO;
1608 }
1609
1610 /* Incomming skb */
1611 bt_cb(skb)->incoming = 1;
1612
1613 /* Time stamp */
1614 __net_timestamp(skb);
1615
1616 /* Queue frame for rx task */
1617 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001618 tasklet_schedule(&hdev->rx_task);
1619
Marcel Holtmann76bca882009-11-18 00:40:39 +01001620 return 0;
1621}
1622EXPORT_SYMBOL(hci_recv_frame);
1623
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301624static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001625 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301626{
1627 int len = 0;
1628 int hlen = 0;
1629 int remain = count;
1630 struct sk_buff *skb;
1631 struct bt_skb_cb *scb;
1632
1633 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1634 index >= NUM_REASSEMBLY)
1635 return -EILSEQ;
1636
1637 skb = hdev->reassembly[index];
1638
1639 if (!skb) {
1640 switch (type) {
1641 case HCI_ACLDATA_PKT:
1642 len = HCI_MAX_FRAME_SIZE;
1643 hlen = HCI_ACL_HDR_SIZE;
1644 break;
1645 case HCI_EVENT_PKT:
1646 len = HCI_MAX_EVENT_SIZE;
1647 hlen = HCI_EVENT_HDR_SIZE;
1648 break;
1649 case HCI_SCODATA_PKT:
1650 len = HCI_MAX_SCO_SIZE;
1651 hlen = HCI_SCO_HDR_SIZE;
1652 break;
1653 }
1654
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001655 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301656 if (!skb)
1657 return -ENOMEM;
1658
1659 scb = (void *) skb->cb;
1660 scb->expect = hlen;
1661 scb->pkt_type = type;
1662
1663 skb->dev = (void *) hdev;
1664 hdev->reassembly[index] = skb;
1665 }
1666
1667 while (count) {
1668 scb = (void *) skb->cb;
1669 len = min(scb->expect, (__u16)count);
1670
1671 memcpy(skb_put(skb, len), data, len);
1672
1673 count -= len;
1674 data += len;
1675 scb->expect -= len;
1676 remain = count;
1677
1678 switch (type) {
1679 case HCI_EVENT_PKT:
1680 if (skb->len == HCI_EVENT_HDR_SIZE) {
1681 struct hci_event_hdr *h = hci_event_hdr(skb);
1682 scb->expect = h->plen;
1683
1684 if (skb_tailroom(skb) < scb->expect) {
1685 kfree_skb(skb);
1686 hdev->reassembly[index] = NULL;
1687 return -ENOMEM;
1688 }
1689 }
1690 break;
1691
1692 case HCI_ACLDATA_PKT:
1693 if (skb->len == HCI_ACL_HDR_SIZE) {
1694 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1695 scb->expect = __le16_to_cpu(h->dlen);
1696
1697 if (skb_tailroom(skb) < scb->expect) {
1698 kfree_skb(skb);
1699 hdev->reassembly[index] = NULL;
1700 return -ENOMEM;
1701 }
1702 }
1703 break;
1704
1705 case HCI_SCODATA_PKT:
1706 if (skb->len == HCI_SCO_HDR_SIZE) {
1707 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1708 scb->expect = h->dlen;
1709
1710 if (skb_tailroom(skb) < scb->expect) {
1711 kfree_skb(skb);
1712 hdev->reassembly[index] = NULL;
1713 return -ENOMEM;
1714 }
1715 }
1716 break;
1717 }
1718
1719 if (scb->expect == 0) {
1720 /* Complete frame */
1721
1722 bt_cb(skb)->pkt_type = type;
1723 hci_recv_frame(skb);
1724
1725 hdev->reassembly[index] = NULL;
1726 return remain;
1727 }
1728 }
1729
1730 return remain;
1731}
1732
Marcel Holtmannef222012007-07-11 06:42:04 +02001733int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1734{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301735 int rem = 0;
1736
Marcel Holtmannef222012007-07-11 06:42:04 +02001737 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1738 return -EILSEQ;
1739
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001740 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001741 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301742 if (rem < 0)
1743 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001744
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301745 data += (count - rem);
1746 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001747 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001748
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301749 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001750}
1751EXPORT_SYMBOL(hci_recv_fragment);
1752
Suraj Sumangala99811512010-07-14 13:02:19 +05301753#define STREAM_REASSEMBLY 0
1754
1755int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1756{
1757 int type;
1758 int rem = 0;
1759
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001760 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301761 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1762
1763 if (!skb) {
1764 struct { char type; } *pkt;
1765
1766 /* Start of the frame */
1767 pkt = data;
1768 type = pkt->type;
1769
1770 data++;
1771 count--;
1772 } else
1773 type = bt_cb(skb)->pkt_type;
1774
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001775 rem = hci_reassembly(hdev, type, data, count,
1776 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301777 if (rem < 0)
1778 return rem;
1779
1780 data += (count - rem);
1781 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001782 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301783
1784 return rem;
1785}
1786EXPORT_SYMBOL(hci_recv_stream_fragment);
1787
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788/* ---- Interface to upper protocols ---- */
1789
1790/* Register/Unregister protocols.
1791 * hci_task_lock is used to ensure that no tasks are running. */
1792int hci_register_proto(struct hci_proto *hp)
1793{
1794 int err = 0;
1795
1796 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1797
1798 if (hp->id >= HCI_MAX_PROTO)
1799 return -EINVAL;
1800
1801 write_lock_bh(&hci_task_lock);
1802
1803 if (!hci_proto[hp->id])
1804 hci_proto[hp->id] = hp;
1805 else
1806 err = -EEXIST;
1807
1808 write_unlock_bh(&hci_task_lock);
1809
1810 return err;
1811}
1812EXPORT_SYMBOL(hci_register_proto);
1813
1814int hci_unregister_proto(struct hci_proto *hp)
1815{
1816 int err = 0;
1817
1818 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1819
1820 if (hp->id >= HCI_MAX_PROTO)
1821 return -EINVAL;
1822
1823 write_lock_bh(&hci_task_lock);
1824
1825 if (hci_proto[hp->id])
1826 hci_proto[hp->id] = NULL;
1827 else
1828 err = -ENOENT;
1829
1830 write_unlock_bh(&hci_task_lock);
1831
1832 return err;
1833}
1834EXPORT_SYMBOL(hci_unregister_proto);
1835
1836int hci_register_cb(struct hci_cb *cb)
1837{
1838 BT_DBG("%p name %s", cb, cb->name);
1839
1840 write_lock_bh(&hci_cb_list_lock);
1841 list_add(&cb->list, &hci_cb_list);
1842 write_unlock_bh(&hci_cb_list_lock);
1843
1844 return 0;
1845}
1846EXPORT_SYMBOL(hci_register_cb);
1847
1848int hci_unregister_cb(struct hci_cb *cb)
1849{
1850 BT_DBG("%p name %s", cb, cb->name);
1851
1852 write_lock_bh(&hci_cb_list_lock);
1853 list_del(&cb->list);
1854 write_unlock_bh(&hci_cb_list_lock);
1855
1856 return 0;
1857}
1858EXPORT_SYMBOL(hci_unregister_cb);
1859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001860int hci_register_amp(struct amp_mgr_cb *cb)
1861{
1862 BT_DBG("%p", cb);
1863
1864 write_lock_bh(&amp_mgr_cb_list_lock);
1865 list_add(&cb->list, &amp_mgr_cb_list);
1866 write_unlock_bh(&amp_mgr_cb_list_lock);
1867
1868 return 0;
1869}
1870EXPORT_SYMBOL(hci_register_amp);
1871
1872int hci_unregister_amp(struct amp_mgr_cb *cb)
1873{
1874 BT_DBG("%p", cb);
1875
1876 write_lock_bh(&amp_mgr_cb_list_lock);
1877 list_del(&cb->list);
1878 write_unlock_bh(&amp_mgr_cb_list_lock);
1879
1880 return 0;
1881}
1882EXPORT_SYMBOL(hci_unregister_amp);
1883
1884void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1885 struct sk_buff *skb)
1886{
1887 struct amp_mgr_cb *cb;
1888
1889 BT_DBG("opcode 0x%x", opcode);
1890
1891 read_lock_bh(&amp_mgr_cb_list_lock);
1892 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1893 if (cb->amp_cmd_complete_event)
1894 cb->amp_cmd_complete_event(hdev, opcode, skb);
1895 }
1896 read_unlock_bh(&amp_mgr_cb_list_lock);
1897}
1898
1899void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1900{
1901 struct amp_mgr_cb *cb;
1902
1903 BT_DBG("opcode 0x%x, status %d", opcode, status);
1904
1905 read_lock_bh(&amp_mgr_cb_list_lock);
1906 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1907 if (cb->amp_cmd_status_event)
1908 cb->amp_cmd_status_event(hdev, opcode, status);
1909 }
1910 read_unlock_bh(&amp_mgr_cb_list_lock);
1911}
1912
1913void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1914 struct sk_buff *skb)
1915{
1916 struct amp_mgr_cb *cb;
1917
1918 BT_DBG("ev_code 0x%x", ev_code);
1919
1920 read_lock_bh(&amp_mgr_cb_list_lock);
1921 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1922 if (cb->amp_event)
1923 cb->amp_event(hdev, ev_code, skb);
1924 }
1925 read_unlock_bh(&amp_mgr_cb_list_lock);
1926}
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928static int hci_send_frame(struct sk_buff *skb)
1929{
1930 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1931
1932 if (!hdev) {
1933 kfree_skb(skb);
1934 return -ENODEV;
1935 }
1936
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001937 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 if (atomic_read(&hdev->promisc)) {
1940 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001941 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001943 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 }
1945
1946 /* Get rid of skb owner, prior to sending to the driver. */
1947 skb_orphan(skb);
1948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 return hdev->send(skb);
1951}
1952
1953/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001954int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955{
1956 int len = HCI_COMMAND_HDR_SIZE + plen;
1957 struct hci_command_hdr *hdr;
1958 struct sk_buff *skb;
1959
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001960 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 skb = bt_skb_alloc(len, GFP_ATOMIC);
1963 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001964 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 return -ENOMEM;
1966 }
1967
1968 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001969 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 hdr->plen = plen;
1971
1972 if (plen)
1973 memcpy(skb_put(skb, plen), param, plen);
1974
1975 BT_DBG("skb len %d", skb->len);
1976
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001977 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001979
Johan Hedberga5040ef2011-01-10 13:28:59 +02001980 if (test_bit(HCI_INIT, &hdev->flags))
1981 hdev->init_last_cmd = opcode;
1982
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001984 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 return 0;
1987}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001991void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
1993 struct hci_command_hdr *hdr;
1994
1995 if (!hdev->sent_cmd)
1996 return NULL;
1997
1998 hdr = (void *) hdev->sent_cmd->data;
1999
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002000 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 return NULL;
2002
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002003 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2006}
2007
2008/* Send ACL data */
2009static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2010{
2011 struct hci_acl_hdr *hdr;
2012 int len = skb->len;
2013
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002014 skb_push(skb, HCI_ACL_HDR_SIZE);
2015 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002016 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002017 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2018 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019}
2020
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002021void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2022 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 struct hci_dev *hdev = conn->hdev;
2025 struct sk_buff *list;
2026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002030 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002031 if (hdev->dev_type == HCI_BREDR)
2032 hci_add_acl_hdr(skb, conn->handle, flags);
2033 else
2034 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002036 list = skb_shinfo(skb)->frag_list;
2037 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 /* Non fragmented */
2039 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2040
2041 skb_queue_tail(&conn->data_q, skb);
2042 } else {
2043 /* Fragmented */
2044 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2045
2046 skb_shinfo(skb)->frag_list = NULL;
2047
2048 /* Queue all fragments atomically */
2049 spin_lock_bh(&conn->data_q.lock);
2050
2051 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002053 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 do {
2055 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002058 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002059 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2062
2063 __skb_queue_tail(&conn->data_q, skb);
2064 } while (list);
2065
2066 spin_unlock_bh(&conn->data_q.lock);
2067 }
2068
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002069 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070}
2071EXPORT_SYMBOL(hci_send_acl);
2072
2073/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002074void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
2076 struct hci_dev *hdev = conn->hdev;
2077 struct hci_sco_hdr hdr;
2078
2079 BT_DBG("%s len %d", hdev->name, skb->len);
2080
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002081 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 hdr.dlen = skb->len;
2083
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002084 skb_push(skb, HCI_SCO_HDR_SIZE);
2085 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002086 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002089 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002092 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}
2094EXPORT_SYMBOL(hci_send_sco);
2095
2096/* ---- HCI TX task (outgoing data) ---- */
2097
2098/* HCI Connection scheduler */
2099static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2100{
2101 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02002102 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 int num = 0, min = ~0;
2104 struct list_head *p;
2105
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002106 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 * added and removed with TX task disabled. */
2108 list_for_each(p, &h->list) {
2109 struct hci_conn *c;
2110 c = list_entry(p, struct hci_conn, list);
2111
Marcel Holtmann769be972008-07-14 20:13:49 +02002112 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002114
2115 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2116 continue;
2117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 num++;
2119
2120 if (c->sent < min) {
2121 min = c->sent;
2122 conn = c;
2123 }
2124 }
2125
2126 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002127 int cnt, q;
2128
2129 switch (conn->type) {
2130 case ACL_LINK:
2131 cnt = hdev->acl_cnt;
2132 break;
2133 case SCO_LINK:
2134 case ESCO_LINK:
2135 cnt = hdev->sco_cnt;
2136 break;
2137 case LE_LINK:
2138 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2139 break;
2140 default:
2141 cnt = 0;
2142 BT_ERR("Unknown link type");
2143 }
2144
2145 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 *quote = q ? q : 1;
2147 } else
2148 *quote = 0;
2149
2150 BT_DBG("conn %p quote %d", conn, *quote);
2151 return conn;
2152}
2153
Ville Tervobae1f5d2011-02-10 22:38:53 -03002154static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155{
2156 struct hci_conn_hash *h = &hdev->conn_hash;
2157 struct list_head *p;
2158 struct hci_conn *c;
2159
Ville Tervobae1f5d2011-02-10 22:38:53 -03002160 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
2162 /* Kill stalled connections */
2163 list_for_each(p, &h->list) {
2164 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002165 if (c->type == type && c->sent) {
2166 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 hdev->name, batostr(&c->dst));
2168 hci_acl_disconn(c, 0x13);
2169 }
2170 }
2171}
2172
2173static inline void hci_sched_acl(struct hci_dev *hdev)
2174{
2175 struct hci_conn *conn;
2176 struct sk_buff *skb;
2177 int quote;
2178
2179 BT_DBG("%s", hdev->name);
2180
2181 if (!test_bit(HCI_RAW, &hdev->flags)) {
2182 /* ACL tx timeout must be longer than maximum
2183 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184 if (hdev->acl_cnt <= 0 &&
2185 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002186 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 }
2188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002189 while (hdev->acl_cnt > 0 &&
2190 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2191 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2192 int count = 1;
2193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002196 if (hdev->flow_ctl_mode ==
2197 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2198 /* Calculate count of blocks used by
2199 * this packet
2200 */
2201 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2202 hdev->data_block_len) + 1;
2203
2204 if (count > hdev->acl_cnt)
2205 return;
2206
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002207 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_send_frame(skb);
2210 hdev->acl_last_tx = jiffies;
2211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002212 hdev->acl_cnt -= count;
2213 quote -= count;
2214
2215 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 }
2217 }
2218}
2219
2220/* Schedule SCO */
2221static inline void hci_sched_sco(struct hci_dev *hdev)
2222{
2223 struct hci_conn *conn;
2224 struct sk_buff *skb;
2225 int quote;
2226
2227 BT_DBG("%s", hdev->name);
2228
2229 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2230 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2231 BT_DBG("skb %p len %d", skb, skb->len);
2232 hci_send_frame(skb);
2233
2234 conn->sent++;
2235 if (conn->sent == ~0)
2236 conn->sent = 0;
2237 }
2238 }
2239}
2240
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002241static inline void hci_sched_esco(struct hci_dev *hdev)
2242{
2243 struct hci_conn *conn;
2244 struct sk_buff *skb;
2245 int quote;
2246
2247 BT_DBG("%s", hdev->name);
2248
2249 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2250 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2251 BT_DBG("skb %p len %d", skb, skb->len);
2252 hci_send_frame(skb);
2253
2254 conn->sent++;
2255 if (conn->sent == ~0)
2256 conn->sent = 0;
2257 }
2258 }
2259}
2260
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002261static inline void hci_sched_le(struct hci_dev *hdev)
2262{
2263 struct hci_conn *conn;
2264 struct sk_buff *skb;
2265 int quote, cnt;
2266
2267 BT_DBG("%s", hdev->name);
2268
2269 if (!test_bit(HCI_RAW, &hdev->flags)) {
2270 /* LE tx timeout must be longer than maximum
2271 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002272 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002273 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002274 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002275 }
2276
2277 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2278 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2279 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2280 BT_DBG("skb %p len %d", skb, skb->len);
2281
2282 hci_send_frame(skb);
2283 hdev->le_last_tx = jiffies;
2284
2285 cnt--;
2286 conn->sent++;
2287 }
2288 }
2289 if (hdev->le_pkts)
2290 hdev->le_cnt = cnt;
2291 else
2292 hdev->acl_cnt = cnt;
2293}
2294
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295static void hci_tx_task(unsigned long arg)
2296{
2297 struct hci_dev *hdev = (struct hci_dev *) arg;
2298 struct sk_buff *skb;
2299
2300 read_lock(&hci_task_lock);
2301
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002302 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2303 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
2305 /* Schedule queues and send stuff to HCI driver */
2306
2307 hci_sched_acl(hdev);
2308
2309 hci_sched_sco(hdev);
2310
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002311 hci_sched_esco(hdev);
2312
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002313 hci_sched_le(hdev);
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 /* Send next queued raw (unknown type) packet */
2316 while ((skb = skb_dequeue(&hdev->raw_q)))
2317 hci_send_frame(skb);
2318
2319 read_unlock(&hci_task_lock);
2320}
2321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002322/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324/* ACL data packet */
2325static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2326{
2327 struct hci_acl_hdr *hdr = (void *) skb->data;
2328 struct hci_conn *conn;
2329 __u16 handle, flags;
2330
2331 skb_pull(skb, HCI_ACL_HDR_SIZE);
2332
2333 handle = __le16_to_cpu(hdr->handle);
2334 flags = hci_flags(handle);
2335 handle = hci_handle(handle);
2336
2337 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2338
2339 hdev->stat.acl_rx++;
2340
2341 hci_dev_lock(hdev);
2342 conn = hci_conn_hash_lookup_handle(hdev, handle);
2343 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 if (conn) {
2346 register struct hci_proto *hp;
2347
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002348 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002351 hp = hci_proto[HCI_PROTO_L2CAP];
2352 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353 hp->recv_acldata(conn, skb, flags);
2354 return;
2355 }
2356 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002357 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 hdev->name, handle);
2359 }
2360
2361 kfree_skb(skb);
2362}
2363
2364/* SCO data packet */
2365static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2366{
2367 struct hci_sco_hdr *hdr = (void *) skb->data;
2368 struct hci_conn *conn;
2369 __u16 handle;
2370
2371 skb_pull(skb, HCI_SCO_HDR_SIZE);
2372
2373 handle = __le16_to_cpu(hdr->handle);
2374
2375 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2376
2377 hdev->stat.sco_rx++;
2378
2379 hci_dev_lock(hdev);
2380 conn = hci_conn_hash_lookup_handle(hdev, handle);
2381 hci_dev_unlock(hdev);
2382
2383 if (conn) {
2384 register struct hci_proto *hp;
2385
2386 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002387 hp = hci_proto[HCI_PROTO_SCO];
2388 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 hp->recv_scodata(conn, skb);
2390 return;
2391 }
2392 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002393 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 hdev->name, handle);
2395 }
2396
2397 kfree_skb(skb);
2398}
2399
Marcel Holtmann65164552005-10-28 19:20:48 +02002400static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401{
2402 struct hci_dev *hdev = (struct hci_dev *) arg;
2403 struct sk_buff *skb;
2404
2405 BT_DBG("%s", hdev->name);
2406
2407 read_lock(&hci_task_lock);
2408
2409 while ((skb = skb_dequeue(&hdev->rx_q))) {
2410 if (atomic_read(&hdev->promisc)) {
2411 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002412 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 }
2414
2415 if (test_bit(HCI_RAW, &hdev->flags)) {
2416 kfree_skb(skb);
2417 continue;
2418 }
2419
2420 if (test_bit(HCI_INIT, &hdev->flags)) {
2421 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002422 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 case HCI_ACLDATA_PKT:
2424 case HCI_SCODATA_PKT:
2425 kfree_skb(skb);
2426 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 }
2429
2430 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002431 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 case HCI_EVENT_PKT:
2433 hci_event_packet(hdev, skb);
2434 break;
2435
2436 case HCI_ACLDATA_PKT:
2437 BT_DBG("%s ACL data packet", hdev->name);
2438 hci_acldata_packet(hdev, skb);
2439 break;
2440
2441 case HCI_SCODATA_PKT:
2442 BT_DBG("%s SCO data packet", hdev->name);
2443 hci_scodata_packet(hdev, skb);
2444 break;
2445
2446 default:
2447 kfree_skb(skb);
2448 break;
2449 }
2450 }
2451
2452 read_unlock(&hci_task_lock);
2453}
2454
2455static void hci_cmd_task(unsigned long arg)
2456{
2457 struct hci_dev *hdev = (struct hci_dev *) arg;
2458 struct sk_buff *skb;
2459
2460 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2461
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002463 if (atomic_read(&hdev->cmd_cnt)) {
2464 skb = skb_dequeue(&hdev->cmd_q);
2465 if (!skb)
2466 return;
2467
Wei Yongjun7585b972009-02-25 18:29:52 +08002468 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002470 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2471 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 atomic_dec(&hdev->cmd_cnt);
2473 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002474 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002475 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 } else {
2477 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002478 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 }
2480 }
2481}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002482
2483module_param(enable_smp, bool, 0644);
2484MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");