blob: 97f0775a125600b55838379b3aa72b2ed1d50d26 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Brian Gix3cd62042012-01-11 15:18:17 -08003 Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219 skb_queue_purge(&hdev->driver_init);
220
221 /* Mandatory initialization */
222
223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
308}
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __u8 scan = opt;
313
314 BT_DBG("%s %x", hdev->name, scan);
315
316 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200317 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
321{
322 __u8 auth = opt;
323
324 BT_DBG("%s %x", hdev->name, auth);
325
326 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200327 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
331{
332 __u8 encrypt = opt;
333
334 BT_DBG("%s %x", hdev->name, encrypt);
335
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200336 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200337 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200340static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
341{
342 __le16 policy = cpu_to_le16(opt);
343
Marcel Holtmanna418b892008-11-30 12:17:28 +0100344 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200345
346 /* Default link policy */
347 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
348}
349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900350/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 * Device is held on return. */
352struct hci_dev *hci_dev_get(int index)
353{
354 struct hci_dev *hdev = NULL;
355 struct list_head *p;
356
357 BT_DBG("%d", index);
358
359 if (index < 0)
360 return NULL;
361
362 read_lock(&hci_dev_list_lock);
363 list_for_each(p, &hci_dev_list) {
364 struct hci_dev *d = list_entry(p, struct hci_dev, list);
365 if (d->id == index) {
366 hdev = hci_dev_hold(d);
367 break;
368 }
369 }
370 read_unlock(&hci_dev_list_lock);
371 return hdev;
372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375/* ---- Inquiry support ---- */
376static void inquiry_cache_flush(struct hci_dev *hdev)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *next = cache->list, *e;
380
381 BT_DBG("cache %p", cache);
382
383 cache->list = NULL;
384 while ((e = next)) {
385 next = e->next;
386 kfree(e);
387 }
388}
389
390struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_entry *e;
394
395 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
396
397 for (e = cache->list; e; e = e->next)
398 if (!bacmp(&e->data.bdaddr, bdaddr))
399 break;
400 return e;
401}
402
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
404{
405 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200406 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
409
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200410 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
411 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200413 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
414 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416
417 ie->next = cache->list;
418 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200421 memcpy(&ie->data, data, sizeof(*data));
422 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 cache->timestamp = jiffies;
424}
425
426static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
427{
428 struct inquiry_cache *cache = &hdev->inq_cache;
429 struct inquiry_info *info = (struct inquiry_info *) buf;
430 struct inquiry_entry *e;
431 int copied = 0;
432
433 for (e = cache->list; e && copied < num; e = e->next, copied++) {
434 struct inquiry_data *data = &e->data;
435 bacpy(&info->bdaddr, &data->bdaddr);
436 info->pscan_rep_mode = data->pscan_rep_mode;
437 info->pscan_period_mode = data->pscan_period_mode;
438 info->pscan_mode = data->pscan_mode;
439 memcpy(info->dev_class, data->dev_class, 3);
440 info->clock_offset = data->clock_offset;
441 info++;
442 }
443
444 BT_DBG("cache %p, copied %d", cache, copied);
445 return copied;
446}
447
448static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
449{
450 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
451 struct hci_cp_inquiry cp;
452
453 BT_DBG("%s", hdev->name);
454
455 if (test_bit(HCI_INQUIRY, &hdev->flags))
456 return;
457
458 /* Start Inquiry */
459 memcpy(&cp.lap, &ir->lap, 3);
460 cp.length = ir->length;
461 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200462 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465int hci_inquiry(void __user *arg)
466{
467 __u8 __user *ptr = arg;
468 struct hci_inquiry_req ir;
469 struct hci_dev *hdev;
470 int err = 0, do_inquiry = 0, max_rsp;
471 long timeo;
472 __u8 *buf;
473
474 if (copy_from_user(&ir, ptr, sizeof(ir)))
475 return -EFAULT;
476
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200477 hdev = hci_dev_get(ir.dev_id);
478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -ENODEV;
480
481 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900482 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 inquiry_cache_empty(hdev) ||
484 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 inquiry_cache_flush(hdev);
486 do_inquiry = 1;
487 }
488 hci_dev_unlock_bh(hdev);
489
Marcel Holtmann04837f62006-07-03 10:02:33 +0200490 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491
492 if (do_inquiry) {
493 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
494 if (err < 0)
495 goto done;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* for unlimited number of responses we will use buffer with 255 entries */
499 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
500
501 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
502 * copy it to the user space.
503 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100504 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -ENOMEM;
507 goto done;
508 }
509
510 hci_dev_lock_bh(hdev);
511 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
512 hci_dev_unlock_bh(hdev);
513
514 BT_DBG("num_rsp %d", ir.num_rsp);
515
516 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
517 ptr += sizeof(ir);
518 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 ir.num_rsp))
520 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900521 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 err = -EFAULT;
523
524 kfree(buf);
525
526done:
527 hci_dev_put(hdev);
528 return err;
529}
530
531/* ---- HCI ioctl helpers ---- */
532
533int hci_dev_open(__u16 dev)
534{
535 struct hci_dev *hdev;
536 int ret = 0;
537
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200538 hdev = hci_dev_get(dev);
539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -ENODEV;
541
542 BT_DBG("%s %p", hdev->name, hdev);
543
544 hci_req_lock(hdev);
545
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200546 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
547 ret = -ERFKILL;
548 goto done;
549 }
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (test_bit(HCI_UP, &hdev->flags)) {
552 ret = -EALREADY;
553 goto done;
554 }
555
556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
557 set_bit(HCI_RAW, &hdev->flags);
558
559 if (hdev->open(hdev)) {
560 ret = -EIO;
561 goto done;
562 }
563
564 if (!test_bit(HCI_RAW, &hdev->flags)) {
565 atomic_set(&hdev->cmd_cnt, 1);
566 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200567 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Marcel Holtmann04837f62006-07-03 10:02:33 +0200569 ret = __hci_request(hdev, hci_init_req, 0,
570 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300573 ret = __hci_request(hdev, hci_le_init_req, 0,
574 msecs_to_jiffies(HCI_INIT_TIMEOUT));
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 clear_bit(HCI_INIT, &hdev->flags);
577 }
578
579 if (!ret) {
580 hci_dev_hold(hdev);
581 set_bit(HCI_UP, &hdev->flags);
582 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700583 if (!test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800584 hdev->dev_type == HCI_BREDR) {
585 hci_dev_lock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200586 mgmt_powered(hdev->id, 1);
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800587 hci_dev_unlock_bh(hdev);
588 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900589 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 /* Init failed, cleanup */
591 tasklet_kill(&hdev->rx_task);
592 tasklet_kill(&hdev->tx_task);
593 tasklet_kill(&hdev->cmd_task);
594
595 skb_queue_purge(&hdev->cmd_q);
596 skb_queue_purge(&hdev->rx_q);
597
598 if (hdev->flush)
599 hdev->flush(hdev);
600
601 if (hdev->sent_cmd) {
602 kfree_skb(hdev->sent_cmd);
603 hdev->sent_cmd = NULL;
604 }
605
606 hdev->close(hdev);
607 hdev->flags = 0;
608 }
609
610done:
611 hci_req_unlock(hdev);
612 hci_dev_put(hdev);
613 return ret;
614}
615
616static int hci_dev_do_close(struct hci_dev *hdev)
617{
Mat Martineau4106b992011-11-18 15:26:21 -0800618 unsigned long keepflags = 0;
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 BT_DBG("%s %p", hdev->name, hdev);
621
622 hci_req_cancel(hdev, ENODEV);
623 hci_req_lock(hdev);
624
625 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300626 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 hci_req_unlock(hdev);
628 return 0;
629 }
630
631 /* Kill RX and TX tasks */
632 tasklet_kill(&hdev->rx_task);
633 tasklet_kill(&hdev->tx_task);
634
635 hci_dev_lock_bh(hdev);
636 inquiry_cache_flush(hdev);
637 hci_conn_hash_flush(hdev);
638 hci_dev_unlock_bh(hdev);
639
640 hci_notify(hdev, HCI_DEV_DOWN);
641
Bhasker Netiffdff572011-12-21 17:24:01 -0800642 if (hdev->dev_type == HCI_BREDR) {
643 hci_dev_lock_bh(hdev);
644 mgmt_powered(hdev->id, 0);
645 hci_dev_unlock_bh(hdev);
646 }
647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (hdev->flush)
649 hdev->flush(hdev);
650
651 /* Reset device */
652 skb_queue_purge(&hdev->cmd_q);
653 atomic_set(&hdev->cmd_cnt, 1);
654 if (!test_bit(HCI_RAW, &hdev->flags)) {
655 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200656 __hci_request(hdev, hci_reset_req, 0,
657 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 clear_bit(HCI_INIT, &hdev->flags);
659 }
660
661 /* Kill cmd task */
662 tasklet_kill(&hdev->cmd_task);
663
664 /* Drop queues */
665 skb_queue_purge(&hdev->rx_q);
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->raw_q);
668
669 /* Drop last sent command */
670 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300671 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 kfree_skb(hdev->sent_cmd);
673 hdev->sent_cmd = NULL;
674 }
675
676 /* After this point our queues are empty
677 * and no tasks are scheduled. */
678 hdev->close(hdev);
679
Mat Martineau4106b992011-11-18 15:26:21 -0800680 /* Clear only non-persistent flags */
681 if (test_bit(HCI_MGMT, &hdev->flags))
682 set_bit(HCI_MGMT, &keepflags);
683 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
684 set_bit(HCI_LINK_KEYS, &keepflags);
685 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
686 set_bit(HCI_DEBUG_KEYS, &keepflags);
687
688 hdev->flags = keepflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 hci_req_unlock(hdev);
691
692 hci_dev_put(hdev);
693 return 0;
694}
695
696int hci_dev_close(__u16 dev)
697{
698 struct hci_dev *hdev;
699 int err;
700
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200701 hdev = hci_dev_get(dev);
702 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return -ENODEV;
704 err = hci_dev_do_close(hdev);
705 hci_dev_put(hdev);
706 return err;
707}
708
709int hci_dev_reset(__u16 dev)
710{
711 struct hci_dev *hdev;
712 int ret = 0;
713
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200714 hdev = hci_dev_get(dev);
715 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return -ENODEV;
717
718 hci_req_lock(hdev);
719 tasklet_disable(&hdev->tx_task);
720
721 if (!test_bit(HCI_UP, &hdev->flags))
722 goto done;
723
724 /* Drop queues */
725 skb_queue_purge(&hdev->rx_q);
726 skb_queue_purge(&hdev->cmd_q);
727
728 hci_dev_lock_bh(hdev);
729 inquiry_cache_flush(hdev);
730 hci_conn_hash_flush(hdev);
731 hci_dev_unlock_bh(hdev);
732
733 if (hdev->flush)
734 hdev->flush(hdev);
735
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900736 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300737 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200740 ret = __hci_request(hdev, hci_reset_req, 0,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743done:
744 tasklet_enable(&hdev->tx_task);
745 hci_req_unlock(hdev);
746 hci_dev_put(hdev);
747 return ret;
748}
749
750int hci_dev_reset_stat(__u16 dev)
751{
752 struct hci_dev *hdev;
753 int ret = 0;
754
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200755 hdev = hci_dev_get(dev);
756 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return -ENODEV;
758
759 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
760
761 hci_dev_put(hdev);
762
763 return ret;
764}
765
766int hci_dev_cmd(unsigned int cmd, void __user *arg)
767{
768 struct hci_dev *hdev;
769 struct hci_dev_req dr;
770 int err = 0;
771
772 if (copy_from_user(&dr, arg, sizeof(dr)))
773 return -EFAULT;
774
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200775 hdev = hci_dev_get(dr.dev_id);
776 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 return -ENODEV;
778
779 switch (cmd) {
780 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETENCRYPT:
786 if (!lmp_encrypt_capable(hdev)) {
787 err = -EOPNOTSUPP;
788 break;
789 }
790
791 if (!test_bit(HCI_AUTH, &hdev->flags)) {
792 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200793 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (err)
796 break;
797 }
798
Marcel Holtmann04837f62006-07-03 10:02:33 +0200799 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
800 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 break;
802
803 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200804 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
805 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 break;
807
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200808 case HCISETLINKPOL:
809 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT));
811 break;
812
813 case HCISETLINKMODE:
814 hdev->link_mode = ((__u16) dr.dev_opt) &
815 (HCI_LM_MASTER | HCI_LM_ACCEPT);
816 break;
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 case HCISETPTYPE:
819 hdev->pkt_type = (__u16) dr.dev_opt;
820 break;
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200823 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
824 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 break;
826
827 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200828 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
829 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 break;
831
832 default:
833 err = -EINVAL;
834 break;
835 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 hci_dev_put(hdev);
838 return err;
839}
840
841int hci_get_dev_list(void __user *arg)
842{
843 struct hci_dev_list_req *dl;
844 struct hci_dev_req *dr;
845 struct list_head *p;
846 int n = 0, size, err;
847 __u16 dev_num;
848
849 if (get_user(dev_num, (__u16 __user *) arg))
850 return -EFAULT;
851
852 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
853 return -EINVAL;
854
855 size = sizeof(*dl) + dev_num * sizeof(*dr);
856
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200857 dl = kzalloc(size, GFP_KERNEL);
858 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return -ENOMEM;
860
861 dr = dl->dev_req;
862
863 read_lock_bh(&hci_dev_list_lock);
864 list_for_each(p, &hci_dev_list) {
865 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200868
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200869 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200870
871 if (!test_bit(HCI_MGMT, &hdev->flags))
872 set_bit(HCI_PAIRABLE, &hdev->flags);
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 (dr + n)->dev_id = hdev->id;
875 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 if (++n >= dev_num)
878 break;
879 }
880 read_unlock_bh(&hci_dev_list_lock);
881
882 dl->dev_num = n;
883 size = sizeof(*dl) + n * sizeof(*dr);
884
885 err = copy_to_user(arg, dl, size);
886 kfree(dl);
887
888 return err ? -EFAULT : 0;
889}
890
891int hci_get_dev_info(void __user *arg)
892{
893 struct hci_dev *hdev;
894 struct hci_dev_info di;
895 int err = 0;
896
897 if (copy_from_user(&di, arg, sizeof(di)))
898 return -EFAULT;
899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900 hdev = hci_dev_get(di.dev_id);
901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -ENODEV;
903
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200904 hci_del_off_timer(hdev);
905
Johan Hedbergc542a062011-01-26 13:11:03 +0200906 if (!test_bit(HCI_MGMT, &hdev->flags))
907 set_bit(HCI_PAIRABLE, &hdev->flags);
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 strcpy(di.name, hdev->name);
910 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100911 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 di.flags = hdev->flags;
913 di.pkt_type = hdev->pkt_type;
914 di.acl_mtu = hdev->acl_mtu;
915 di.acl_pkts = hdev->acl_pkts;
916 di.sco_mtu = hdev->sco_mtu;
917 di.sco_pkts = hdev->sco_pkts;
918 di.link_policy = hdev->link_policy;
919 di.link_mode = hdev->link_mode;
920
921 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
922 memcpy(&di.features, &hdev->features, sizeof(di.features));
923
924 if (copy_to_user(arg, &di, sizeof(di)))
925 err = -EFAULT;
926
927 hci_dev_put(hdev);
928
929 return err;
930}
931
932/* ---- Interface to HCI drivers ---- */
933
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200934static int hci_rfkill_set_block(void *data, bool blocked)
935{
936 struct hci_dev *hdev = data;
937
938 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
939
940 if (!blocked)
941 return 0;
942
943 hci_dev_do_close(hdev);
944
945 return 0;
946}
947
948static const struct rfkill_ops hci_rfkill_ops = {
949 .set_block = hci_rfkill_set_block,
950};
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952/* Alloc HCI device */
953struct hci_dev *hci_alloc_dev(void)
954{
955 struct hci_dev *hdev;
956
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200957 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 if (!hdev)
959 return NULL;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 skb_queue_head_init(&hdev->driver_init);
962
963 return hdev;
964}
965EXPORT_SYMBOL(hci_alloc_dev);
966
967/* Free HCI device */
968void hci_free_dev(struct hci_dev *hdev)
969{
970 skb_queue_purge(&hdev->driver_init);
971
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200972 /* will free via device release */
973 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975EXPORT_SYMBOL(hci_free_dev);
976
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200977static void hci_power_on(struct work_struct *work)
978{
979 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700980 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200981
982 BT_DBG("%s", hdev->name);
983
Inga Stotland5029fc22011-09-12 15:22:52 -0700984 err = hci_dev_open(hdev->id);
985 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986 return;
987
Peter Krystad1fc44072011-08-30 15:38:12 -0700988 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
989 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200990 mod_timer(&hdev->off_timer,
991 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
992
Peter Krystad1fc44072011-08-30 15:38:12 -0700993 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
994 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200995 mgmt_index_added(hdev->id);
996}
997
998static void hci_power_off(struct work_struct *work)
999{
1000 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
1001
1002 BT_DBG("%s", hdev->name);
1003
1004 hci_dev_close(hdev->id);
1005}
1006
1007static void hci_auto_off(unsigned long data)
1008{
1009 struct hci_dev *hdev = (struct hci_dev *) data;
1010
1011 BT_DBG("%s", hdev->name);
1012
1013 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1014
1015 queue_work(hdev->workqueue, &hdev->power_off);
1016}
1017
1018void hci_del_off_timer(struct hci_dev *hdev)
1019{
1020 BT_DBG("%s", hdev->name);
1021
1022 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1023 del_timer(&hdev->off_timer);
1024}
1025
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001026int hci_uuids_clear(struct hci_dev *hdev)
1027{
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->uuids) {
1031 struct bt_uuid *uuid;
1032
1033 uuid = list_entry(p, struct bt_uuid, list);
1034
1035 list_del(p);
1036 kfree(uuid);
1037 }
1038
1039 return 0;
1040}
1041
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001042int hci_link_keys_clear(struct hci_dev *hdev)
1043{
1044 struct list_head *p, *n;
1045
1046 list_for_each_safe(p, n, &hdev->link_keys) {
1047 struct link_key *key;
1048
1049 key = list_entry(p, struct link_key, list);
1050
1051 list_del(p);
1052 kfree(key);
1053 }
1054
1055 return 0;
1056}
1057
1058struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059{
1060 struct list_head *p;
1061
1062 list_for_each(p, &hdev->link_keys) {
1063 struct link_key *k;
1064
1065 k = list_entry(p, struct link_key, list);
1066
1067 if (bacmp(bdaddr, &k->bdaddr) == 0)
1068 return k;
1069 }
1070
1071 return NULL;
1072}
1073
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001074struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1075{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 list_for_each(p, &hdev->link_keys) {
1079 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001080 struct key_master_id *id;
1081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 k = list_entry(p, struct link_key, list);
1083
Brian Gixcf956772011-10-20 15:18:51 -07001084 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001085 continue;
1086
1087 if (k->dlen != sizeof(*id))
1088 continue;
1089
1090 id = (void *) &k->data;
1091 if (id->ediv == ediv &&
1092 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1093 return k;
1094 }
1095
1096 return NULL;
1097}
1098EXPORT_SYMBOL(hci_find_ltk);
1099
1100struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1101 bdaddr_t *bdaddr, u8 type)
1102{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 list_for_each(p, &hdev->link_keys) {
1106 struct link_key *k;
1107
1108 k = list_entry(p, struct link_key, list);
1109
Brian Gixcf956772011-10-20 15:18:51 -07001110 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001111 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001113
1114 return NULL;
1115}
1116EXPORT_SYMBOL(hci_find_link_key_type);
1117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1119 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001120{
1121 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001122 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001124 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001125
1126 old_key = hci_find_link_key(hdev, bdaddr);
1127 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001128 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 key = old_key;
1130 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001132 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1133 if (!key)
1134 return -ENOMEM;
1135 list_add(&key->list, &hdev->link_keys);
1136 }
1137
1138 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1139
1140 bacpy(&key->bdaddr, bdaddr);
1141 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001142 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001143 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001144 key->pin_len = pin_len;
1145
Brian Gixa68668b2011-08-11 15:49:36 -07001146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301147 /* Store the link key persistently if one of the following is true:
1148 * 1. the remote side is using dedicated bonding since in that case
1149 * also the local requirements are set to dedicated bonding
1150 * 2. the local side had dedicated bonding as a requirement
1151 * 3. this is a legacy link key
1152 * 4. this is a changed combination key and there was a previously
1153 * stored one
1154 * If none of the above match only keep the link key around for
1155 * this connection and set the temporary flag for the device.
1156 */
Brian Gixa68668b2011-08-11 15:49:36 -07001157
Brian Gixdfdd9362011-08-18 09:58:02 -07001158 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301159 if ((conn->remote_auth > 0x01) ||
1160 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001161 (key->key_type < 0x03) ||
1162 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001163 bonded = 1;
1164 }
Brian Gixa68668b2011-08-11 15:49:36 -07001165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001167 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
1169 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001170 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001171
1172 return 0;
1173}
1174
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001175int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001176 u8 addr_type, u8 key_size, u8 auth,
1177 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001178{
1179 struct link_key *key, *old_key;
1180 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001181
Brian Gixcf956772011-10-20 15:18:51 -07001182 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1183 batostr(bdaddr), addr_type);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001186 if (old_key) {
1187 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001188 } else {
1189 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1190 if (!key)
1191 return -ENOMEM;
1192 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001193 }
1194
1195 key->dlen = sizeof(*id);
1196
1197 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001198 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001199 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001200 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001201 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001202 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001203
1204 id = (void *) &key->data;
1205 id->ediv = ediv;
1206 memcpy(id->rand, rand, sizeof(id->rand));
1207
1208 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001209 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001210
1211 return 0;
1212}
1213
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001214int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215{
1216 struct link_key *key;
1217
1218 key = hci_find_link_key(hdev, bdaddr);
1219 if (!key)
1220 return -ENOENT;
1221
1222 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1223
1224 list_del(&key->list);
1225 kfree(key);
1226
1227 return 0;
1228}
1229
Ville Tervo6bd32322011-02-16 16:32:41 +02001230/* HCI command timer function */
1231static void hci_cmd_timer(unsigned long arg)
1232{
1233 struct hci_dev *hdev = (void *) arg;
1234
1235 BT_ERR("%s command tx timeout", hdev->name);
1236 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001238 tasklet_schedule(&hdev->cmd_task);
1239}
1240
Szymon Janc2763eda2011-03-22 13:12:22 +01001241struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1242 bdaddr_t *bdaddr)
1243{
1244 struct oob_data *data;
1245
1246 list_for_each_entry(data, &hdev->remote_oob_data, list)
1247 if (bacmp(bdaddr, &data->bdaddr) == 0)
1248 return data;
1249
1250 return NULL;
1251}
1252
1253int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1254{
1255 struct oob_data *data;
1256
1257 data = hci_find_remote_oob_data(hdev, bdaddr);
1258 if (!data)
1259 return -ENOENT;
1260
1261 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1262
1263 list_del(&data->list);
1264 kfree(data);
1265
1266 return 0;
1267}
1268
1269int hci_remote_oob_data_clear(struct hci_dev *hdev)
1270{
1271 struct oob_data *data, *n;
1272
1273 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1274 list_del(&data->list);
1275 kfree(data);
1276 }
1277
1278 return 0;
1279}
1280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281static void hci_adv_clear(unsigned long arg)
1282{
1283 struct hci_dev *hdev = (void *) arg;
1284
1285 hci_adv_entries_clear(hdev);
1286}
1287
1288int hci_adv_entries_clear(struct hci_dev *hdev)
1289{
1290 struct list_head *p, *n;
1291
Brian Gixa68668b2011-08-11 15:49:36 -07001292 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 write_lock_bh(&hdev->adv_entries_lock);
1294
1295 list_for_each_safe(p, n, &hdev->adv_entries) {
1296 struct adv_entry *entry;
1297
1298 entry = list_entry(p, struct adv_entry, list);
1299
1300 list_del(p);
1301 kfree(entry);
1302 }
1303
1304 write_unlock_bh(&hdev->adv_entries_lock);
1305
1306 return 0;
1307}
1308
1309struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct list_head *p;
1312 struct adv_entry *res = NULL;
1313
Brian Gixa68668b2011-08-11 15:49:36 -07001314 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 read_lock_bh(&hdev->adv_entries_lock);
1316
1317 list_for_each(p, &hdev->adv_entries) {
1318 struct adv_entry *entry;
1319
1320 entry = list_entry(p, struct adv_entry, list);
1321
1322 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1323 res = entry;
1324 goto out;
1325 }
1326 }
1327out:
1328 read_unlock_bh(&hdev->adv_entries_lock);
1329 return res;
1330}
1331
1332static inline int is_connectable_adv(u8 evt_type)
1333{
1334 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1335 return 1;
1336
1337 return 0;
1338}
1339
Szymon Janc2763eda2011-03-22 13:12:22 +01001340int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1341 u8 *randomizer)
1342{
1343 struct oob_data *data;
1344
1345 data = hci_find_remote_oob_data(hdev, bdaddr);
1346
1347 if (!data) {
1348 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1349 if (!data)
1350 return -ENOMEM;
1351
1352 bacpy(&data->bdaddr, bdaddr);
1353 list_add(&data->list, &hdev->remote_oob_data);
1354 }
1355
1356 memcpy(data->hash, hash, sizeof(data->hash));
1357 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1358
1359 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1360
1361 return 0;
1362}
1363
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001364int hci_add_adv_entry(struct hci_dev *hdev,
1365 struct hci_ev_le_advertising_info *ev)
1366{
1367 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001368 u8 flags = 0;
1369 int i;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001370
Brian Gixa68668b2011-08-11 15:49:36 -07001371 BT_DBG("");
1372
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001373 if (!is_connectable_adv(ev->evt_type))
1374 return -EINVAL;
1375
Brian Gixfdd38922011-09-28 16:23:48 -07001376 if (ev->data && ev->length) {
1377 for (i = 0; (i + 2) < ev->length; i++)
1378 if (ev->data[i+1] == 0x01) {
1379 flags = ev->data[i+2];
1380 BT_DBG("flags: %2.2x", flags);
1381 break;
1382 } else {
1383 i += ev->data[i];
1384 }
1385 }
1386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001388 /* Only new entries should be added to adv_entries. So, if
1389 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001390 if (entry) {
1391 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001392 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001393 }
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001394
1395 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1396 if (!entry)
1397 return -ENOMEM;
1398
1399 bacpy(&entry->bdaddr, &ev->bdaddr);
1400 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001401 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001404 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001406
1407 return 0;
1408}
1409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410static struct crypto_blkcipher *alloc_cypher(void)
1411{
1412 if (enable_smp)
1413 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1414
1415 return ERR_PTR(-ENOTSUPP);
1416}
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418/* Register HCI device */
1419int hci_register_dev(struct hci_dev *hdev)
1420{
1421 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001422 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001424 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1425 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 if (!hdev->open || !hdev->close || !hdev->destruct)
1428 return -EINVAL;
1429
Peter Krystad462bf762011-09-19 14:20:20 -07001430 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 write_lock_bh(&hci_dev_list_lock);
1433
1434 /* Find first available device id */
1435 list_for_each(p, &hci_dev_list) {
1436 if (list_entry(p, struct hci_dev, list)->id != id)
1437 break;
1438 head = p; id++;
1439 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 sprintf(hdev->name, "hci%d", id);
1442 hdev->id = id;
1443 list_add(&hdev->list, head);
1444
1445 atomic_set(&hdev->refcnt, 1);
1446 spin_lock_init(&hdev->lock);
1447
1448 hdev->flags = 0;
1449 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001450 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001452 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Marcel Holtmann04837f62006-07-03 10:02:33 +02001454 hdev->idle_timeout = 0;
1455 hdev->sniff_max_interval = 800;
1456 hdev->sniff_min_interval = 80;
1457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1460 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1461
1462 skb_queue_head_init(&hdev->rx_q);
1463 skb_queue_head_init(&hdev->cmd_q);
1464 skb_queue_head_init(&hdev->raw_q);
1465
Ville Tervo6bd32322011-02-16 16:32:41 +02001466 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1467
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301468 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001469 hdev->reassembly[i] = NULL;
1470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001472 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474 inquiry_cache_init(hdev);
1475
1476 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
David Millerea4bd8b2010-07-30 21:54:49 -07001479 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001480
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001481 INIT_LIST_HEAD(&hdev->uuids);
1482
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001483 INIT_LIST_HEAD(&hdev->link_keys);
1484
Szymon Janc2763eda2011-03-22 13:12:22 +01001485 INIT_LIST_HEAD(&hdev->remote_oob_data);
1486
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001487 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 rwlock_init(&hdev->adv_entries_lock);
1489 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001490
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001491 INIT_WORK(&hdev->power_on, hci_power_on);
1492 INIT_WORK(&hdev->power_off, hci_power_off);
1493 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1496
1497 atomic_set(&hdev->promisc, 0);
1498
1499 write_unlock_bh(&hci_dev_list_lock);
1500
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001501 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1502 if (!hdev->workqueue)
1503 goto nomem;
1504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001506 if (IS_ERR(hdev->tfm))
1507 BT_INFO("Failed to load transform for ecb(aes): %ld",
1508 PTR_ERR(hdev->tfm));
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 hci_register_sysfs(hdev);
1511
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001512 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1513 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1514 if (hdev->rfkill) {
1515 if (rfkill_register(hdev->rfkill) < 0) {
1516 rfkill_destroy(hdev->rfkill);
1517 hdev->rfkill = NULL;
1518 }
1519 }
1520
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001521 set_bit(HCI_AUTO_OFF, &hdev->flags);
1522 set_bit(HCI_SETUP, &hdev->flags);
1523 queue_work(hdev->workqueue, &hdev->power_on);
1524
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 hci_notify(hdev, HCI_DEV_REG);
1526
1527 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001528
1529nomem:
1530 write_lock_bh(&hci_dev_list_lock);
1531 list_del(&hdev->list);
1532 write_unlock_bh(&hci_dev_list_lock);
1533
1534 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535}
1536EXPORT_SYMBOL(hci_register_dev);
1537
1538/* Unregister HCI device */
1539int hci_unregister_dev(struct hci_dev *hdev)
1540{
Marcel Holtmannef222012007-07-11 06:42:04 +02001541 int i;
1542
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001543 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 write_lock_bh(&hci_dev_list_lock);
1546 list_del(&hdev->list);
1547 write_unlock_bh(&hci_dev_list_lock);
1548
1549 hci_dev_do_close(hdev);
1550
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301551 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001552 kfree_skb(hdev->reassembly[i]);
1553
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001554 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001555 !test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001556 hdev->dev_type == HCI_BREDR) {
1557 hci_dev_lock_bh(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001558 mgmt_index_removed(hdev->id);
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001559 hci_dev_unlock_bh(hdev);
1560 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001561
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001562 if (!IS_ERR(hdev->tfm))
1563 crypto_free_blkcipher(hdev->tfm);
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 hci_notify(hdev, HCI_DEV_UNREG);
1566
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001567 if (hdev->rfkill) {
1568 rfkill_unregister(hdev->rfkill);
1569 rfkill_destroy(hdev->rfkill);
1570 }
1571
Dave Young147e2d52008-03-05 18:45:59 -08001572 hci_unregister_sysfs(hdev);
1573
Brian Gix3cd62042012-01-11 15:18:17 -08001574 /* Disable all timers */
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001575 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001576 del_timer(&hdev->adv_timer);
Brian Gix3cd62042012-01-11 15:18:17 -08001577 del_timer(&hdev->cmd_timer);
1578 del_timer(&hdev->disc_timer);
1579 del_timer(&hdev->disc_le_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001580
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001581 destroy_workqueue(hdev->workqueue);
1582
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001583 hci_dev_lock_bh(hdev);
1584 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001585 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001586 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001587 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001588 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001589 hci_dev_unlock_bh(hdev);
1590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 return 0;
1594}
1595EXPORT_SYMBOL(hci_unregister_dev);
1596
1597/* Suspend HCI device */
1598int hci_suspend_dev(struct hci_dev *hdev)
1599{
1600 hci_notify(hdev, HCI_DEV_SUSPEND);
1601 return 0;
1602}
1603EXPORT_SYMBOL(hci_suspend_dev);
1604
1605/* Resume HCI device */
1606int hci_resume_dev(struct hci_dev *hdev)
1607{
1608 hci_notify(hdev, HCI_DEV_RESUME);
1609 return 0;
1610}
1611EXPORT_SYMBOL(hci_resume_dev);
1612
Marcel Holtmann76bca882009-11-18 00:40:39 +01001613/* Receive frame from HCI drivers */
1614int hci_recv_frame(struct sk_buff *skb)
1615{
1616 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1617 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1618 && !test_bit(HCI_INIT, &hdev->flags))) {
1619 kfree_skb(skb);
1620 return -ENXIO;
1621 }
1622
1623 /* Incomming skb */
1624 bt_cb(skb)->incoming = 1;
1625
1626 /* Time stamp */
1627 __net_timestamp(skb);
1628
1629 /* Queue frame for rx task */
1630 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001631 tasklet_schedule(&hdev->rx_task);
1632
Marcel Holtmann76bca882009-11-18 00:40:39 +01001633 return 0;
1634}
1635EXPORT_SYMBOL(hci_recv_frame);
1636
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301637static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001638 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301639{
1640 int len = 0;
1641 int hlen = 0;
1642 int remain = count;
1643 struct sk_buff *skb;
1644 struct bt_skb_cb *scb;
1645
1646 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1647 index >= NUM_REASSEMBLY)
1648 return -EILSEQ;
1649
1650 skb = hdev->reassembly[index];
1651
1652 if (!skb) {
1653 switch (type) {
1654 case HCI_ACLDATA_PKT:
1655 len = HCI_MAX_FRAME_SIZE;
1656 hlen = HCI_ACL_HDR_SIZE;
1657 break;
1658 case HCI_EVENT_PKT:
1659 len = HCI_MAX_EVENT_SIZE;
1660 hlen = HCI_EVENT_HDR_SIZE;
1661 break;
1662 case HCI_SCODATA_PKT:
1663 len = HCI_MAX_SCO_SIZE;
1664 hlen = HCI_SCO_HDR_SIZE;
1665 break;
1666 }
1667
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001668 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301669 if (!skb)
1670 return -ENOMEM;
1671
1672 scb = (void *) skb->cb;
1673 scb->expect = hlen;
1674 scb->pkt_type = type;
1675
1676 skb->dev = (void *) hdev;
1677 hdev->reassembly[index] = skb;
1678 }
1679
1680 while (count) {
1681 scb = (void *) skb->cb;
1682 len = min(scb->expect, (__u16)count);
1683
1684 memcpy(skb_put(skb, len), data, len);
1685
1686 count -= len;
1687 data += len;
1688 scb->expect -= len;
1689 remain = count;
1690
1691 switch (type) {
1692 case HCI_EVENT_PKT:
1693 if (skb->len == HCI_EVENT_HDR_SIZE) {
1694 struct hci_event_hdr *h = hci_event_hdr(skb);
1695 scb->expect = h->plen;
1696
1697 if (skb_tailroom(skb) < scb->expect) {
1698 kfree_skb(skb);
1699 hdev->reassembly[index] = NULL;
1700 return -ENOMEM;
1701 }
1702 }
1703 break;
1704
1705 case HCI_ACLDATA_PKT:
1706 if (skb->len == HCI_ACL_HDR_SIZE) {
1707 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1708 scb->expect = __le16_to_cpu(h->dlen);
1709
1710 if (skb_tailroom(skb) < scb->expect) {
1711 kfree_skb(skb);
1712 hdev->reassembly[index] = NULL;
1713 return -ENOMEM;
1714 }
1715 }
1716 break;
1717
1718 case HCI_SCODATA_PKT:
1719 if (skb->len == HCI_SCO_HDR_SIZE) {
1720 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1721 scb->expect = h->dlen;
1722
1723 if (skb_tailroom(skb) < scb->expect) {
1724 kfree_skb(skb);
1725 hdev->reassembly[index] = NULL;
1726 return -ENOMEM;
1727 }
1728 }
1729 break;
1730 }
1731
1732 if (scb->expect == 0) {
1733 /* Complete frame */
1734
1735 bt_cb(skb)->pkt_type = type;
1736 hci_recv_frame(skb);
1737
1738 hdev->reassembly[index] = NULL;
1739 return remain;
1740 }
1741 }
1742
1743 return remain;
1744}
1745
Marcel Holtmannef222012007-07-11 06:42:04 +02001746int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1747{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301748 int rem = 0;
1749
Marcel Holtmannef222012007-07-11 06:42:04 +02001750 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1751 return -EILSEQ;
1752
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001753 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001754 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301755 if (rem < 0)
1756 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001757
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301758 data += (count - rem);
1759 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001760 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001761
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301762 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001763}
1764EXPORT_SYMBOL(hci_recv_fragment);
1765
Suraj Sumangala99811512010-07-14 13:02:19 +05301766#define STREAM_REASSEMBLY 0
1767
1768int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1769{
1770 int type;
1771 int rem = 0;
1772
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001773 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301774 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1775
1776 if (!skb) {
1777 struct { char type; } *pkt;
1778
1779 /* Start of the frame */
1780 pkt = data;
1781 type = pkt->type;
1782
1783 data++;
1784 count--;
1785 } else
1786 type = bt_cb(skb)->pkt_type;
1787
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001788 rem = hci_reassembly(hdev, type, data, count,
1789 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301790 if (rem < 0)
1791 return rem;
1792
1793 data += (count - rem);
1794 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001795 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301796
1797 return rem;
1798}
1799EXPORT_SYMBOL(hci_recv_stream_fragment);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801/* ---- Interface to upper protocols ---- */
1802
1803/* Register/Unregister protocols.
1804 * hci_task_lock is used to ensure that no tasks are running. */
1805int hci_register_proto(struct hci_proto *hp)
1806{
1807 int err = 0;
1808
1809 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1810
1811 if (hp->id >= HCI_MAX_PROTO)
1812 return -EINVAL;
1813
1814 write_lock_bh(&hci_task_lock);
1815
1816 if (!hci_proto[hp->id])
1817 hci_proto[hp->id] = hp;
1818 else
1819 err = -EEXIST;
1820
1821 write_unlock_bh(&hci_task_lock);
1822
1823 return err;
1824}
1825EXPORT_SYMBOL(hci_register_proto);
1826
1827int hci_unregister_proto(struct hci_proto *hp)
1828{
1829 int err = 0;
1830
1831 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1832
1833 if (hp->id >= HCI_MAX_PROTO)
1834 return -EINVAL;
1835
1836 write_lock_bh(&hci_task_lock);
1837
1838 if (hci_proto[hp->id])
1839 hci_proto[hp->id] = NULL;
1840 else
1841 err = -ENOENT;
1842
1843 write_unlock_bh(&hci_task_lock);
1844
1845 return err;
1846}
1847EXPORT_SYMBOL(hci_unregister_proto);
1848
1849int hci_register_cb(struct hci_cb *cb)
1850{
1851 BT_DBG("%p name %s", cb, cb->name);
1852
1853 write_lock_bh(&hci_cb_list_lock);
1854 list_add(&cb->list, &hci_cb_list);
1855 write_unlock_bh(&hci_cb_list_lock);
1856
1857 return 0;
1858}
1859EXPORT_SYMBOL(hci_register_cb);
1860
1861int hci_unregister_cb(struct hci_cb *cb)
1862{
1863 BT_DBG("%p name %s", cb, cb->name);
1864
1865 write_lock_bh(&hci_cb_list_lock);
1866 list_del(&cb->list);
1867 write_unlock_bh(&hci_cb_list_lock);
1868
1869 return 0;
1870}
1871EXPORT_SYMBOL(hci_unregister_cb);
1872
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001873int hci_register_amp(struct amp_mgr_cb *cb)
1874{
1875 BT_DBG("%p", cb);
1876
1877 write_lock_bh(&amp_mgr_cb_list_lock);
1878 list_add(&cb->list, &amp_mgr_cb_list);
1879 write_unlock_bh(&amp_mgr_cb_list_lock);
1880
1881 return 0;
1882}
1883EXPORT_SYMBOL(hci_register_amp);
1884
1885int hci_unregister_amp(struct amp_mgr_cb *cb)
1886{
1887 BT_DBG("%p", cb);
1888
1889 write_lock_bh(&amp_mgr_cb_list_lock);
1890 list_del(&cb->list);
1891 write_unlock_bh(&amp_mgr_cb_list_lock);
1892
1893 return 0;
1894}
1895EXPORT_SYMBOL(hci_unregister_amp);
1896
1897void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1898 struct sk_buff *skb)
1899{
1900 struct amp_mgr_cb *cb;
1901
1902 BT_DBG("opcode 0x%x", opcode);
1903
1904 read_lock_bh(&amp_mgr_cb_list_lock);
1905 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1906 if (cb->amp_cmd_complete_event)
1907 cb->amp_cmd_complete_event(hdev, opcode, skb);
1908 }
1909 read_unlock_bh(&amp_mgr_cb_list_lock);
1910}
1911
1912void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1913{
1914 struct amp_mgr_cb *cb;
1915
1916 BT_DBG("opcode 0x%x, status %d", opcode, status);
1917
1918 read_lock_bh(&amp_mgr_cb_list_lock);
1919 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1920 if (cb->amp_cmd_status_event)
1921 cb->amp_cmd_status_event(hdev, opcode, status);
1922 }
1923 read_unlock_bh(&amp_mgr_cb_list_lock);
1924}
1925
1926void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1927 struct sk_buff *skb)
1928{
1929 struct amp_mgr_cb *cb;
1930
1931 BT_DBG("ev_code 0x%x", ev_code);
1932
1933 read_lock_bh(&amp_mgr_cb_list_lock);
1934 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1935 if (cb->amp_event)
1936 cb->amp_event(hdev, ev_code, skb);
1937 }
1938 read_unlock_bh(&amp_mgr_cb_list_lock);
1939}
1940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941static int hci_send_frame(struct sk_buff *skb)
1942{
1943 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1944
1945 if (!hdev) {
1946 kfree_skb(skb);
1947 return -ENODEV;
1948 }
1949
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001950 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
1952 if (atomic_read(&hdev->promisc)) {
1953 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001954 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001956 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 }
1958
1959 /* Get rid of skb owner, prior to sending to the driver. */
1960 skb_orphan(skb);
1961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 return hdev->send(skb);
1964}
1965
1966/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001967int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968{
1969 int len = HCI_COMMAND_HDR_SIZE + plen;
1970 struct hci_command_hdr *hdr;
1971 struct sk_buff *skb;
1972
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001973 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
1975 skb = bt_skb_alloc(len, GFP_ATOMIC);
1976 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001977 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 return -ENOMEM;
1979 }
1980
1981 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001982 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 hdr->plen = plen;
1984
1985 if (plen)
1986 memcpy(skb_put(skb, plen), param, plen);
1987
1988 BT_DBG("skb len %d", skb->len);
1989
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001990 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001992
Johan Hedberga5040ef2011-01-10 13:28:59 +02001993 if (test_bit(HCI_INIT, &hdev->flags))
1994 hdev->init_last_cmd = opcode;
1995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001997 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 return 0;
2000}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002001EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002004void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
2006 struct hci_command_hdr *hdr;
2007
2008 if (!hdev->sent_cmd)
2009 return NULL;
2010
2011 hdr = (void *) hdev->sent_cmd->data;
2012
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002013 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 return NULL;
2015
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002016 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2019}
2020
2021/* Send ACL data */
2022static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2023{
2024 struct hci_acl_hdr *hdr;
2025 int len = skb->len;
2026
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002027 skb_push(skb, HCI_ACL_HDR_SIZE);
2028 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002029 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002030 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2031 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032}
2033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002034void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2035 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036{
2037 struct hci_dev *hdev = conn->hdev;
2038 struct sk_buff *list;
2039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002043 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 if (hdev->dev_type == HCI_BREDR)
2045 hci_add_acl_hdr(skb, conn->handle, flags);
2046 else
2047 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002049 list = skb_shinfo(skb)->frag_list;
2050 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 /* Non fragmented */
2052 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2053
2054 skb_queue_tail(&conn->data_q, skb);
2055 } else {
2056 /* Fragmented */
2057 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2058
2059 skb_shinfo(skb)->frag_list = NULL;
2060
2061 /* Queue all fragments atomically */
2062 spin_lock_bh(&conn->data_q.lock);
2063
2064 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002066 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 do {
2068 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002071 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002072 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
2074 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2075
2076 __skb_queue_tail(&conn->data_q, skb);
2077 } while (list);
2078
2079 spin_unlock_bh(&conn->data_q.lock);
2080 }
2081
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002082 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083}
2084EXPORT_SYMBOL(hci_send_acl);
2085
2086/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002087void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
2089 struct hci_dev *hdev = conn->hdev;
2090 struct hci_sco_hdr hdr;
2091
2092 BT_DBG("%s len %d", hdev->name, skb->len);
2093
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002094 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 hdr.dlen = skb->len;
2096
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002097 skb_push(skb, HCI_SCO_HDR_SIZE);
2098 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002099 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002102 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002105 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107EXPORT_SYMBOL(hci_send_sco);
2108
2109/* ---- HCI TX task (outgoing data) ---- */
2110
2111/* HCI Connection scheduler */
2112static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2113{
2114 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002115 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 int num = 0, min = ~0;
2117 struct list_head *p;
2118
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002119 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 * added and removed with TX task disabled. */
2121 list_for_each(p, &h->list) {
2122 struct hci_conn *c;
2123 c = list_entry(p, struct hci_conn, list);
2124
Marcel Holtmann769be972008-07-14 20:13:49 +02002125 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002127
2128 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2129 continue;
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 num++;
2132
2133 if (c->sent < min) {
2134 min = c->sent;
2135 conn = c;
2136 }
2137 }
2138
2139 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002140 int cnt, q;
2141
2142 switch (conn->type) {
2143 case ACL_LINK:
2144 cnt = hdev->acl_cnt;
2145 break;
2146 case SCO_LINK:
2147 case ESCO_LINK:
2148 cnt = hdev->sco_cnt;
2149 break;
2150 case LE_LINK:
2151 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2152 break;
2153 default:
2154 cnt = 0;
2155 BT_ERR("Unknown link type");
2156 }
2157
2158 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 *quote = q ? q : 1;
2160 } else
2161 *quote = 0;
2162
2163 BT_DBG("conn %p quote %d", conn, *quote);
2164 return conn;
2165}
2166
Ville Tervobae1f5d2011-02-10 22:38:53 -03002167static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168{
2169 struct hci_conn_hash *h = &hdev->conn_hash;
2170 struct list_head *p;
2171 struct hci_conn *c;
2172
Ville Tervobae1f5d2011-02-10 22:38:53 -03002173 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 /* Kill stalled connections */
2176 list_for_each(p, &h->list) {
2177 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002178 if (c->type == type && c->sent) {
2179 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 hdev->name, batostr(&c->dst));
2181 hci_acl_disconn(c, 0x13);
2182 }
2183 }
2184}
2185
2186static inline void hci_sched_acl(struct hci_dev *hdev)
2187{
2188 struct hci_conn *conn;
2189 struct sk_buff *skb;
2190 int quote;
2191
2192 BT_DBG("%s", hdev->name);
2193
2194 if (!test_bit(HCI_RAW, &hdev->flags)) {
2195 /* ACL tx timeout must be longer than maximum
2196 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002197 if (hdev->acl_cnt <= 0 &&
2198 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002199 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 }
2201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202 while (hdev->acl_cnt > 0 &&
2203 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2204 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2205 int count = 1;
2206
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209 if (hdev->flow_ctl_mode ==
2210 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2211 /* Calculate count of blocks used by
2212 * this packet
2213 */
2214 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2215 hdev->data_block_len) + 1;
2216
2217 if (count > hdev->acl_cnt)
2218 return;
2219
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002220 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002221
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 hci_send_frame(skb);
2223 hdev->acl_last_tx = jiffies;
2224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002225 hdev->acl_cnt -= count;
2226 quote -= count;
2227
2228 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 }
2230 }
2231}
2232
2233/* Schedule SCO */
2234static inline void hci_sched_sco(struct hci_dev *hdev)
2235{
2236 struct hci_conn *conn;
2237 struct sk_buff *skb;
2238 int quote;
2239
2240 BT_DBG("%s", hdev->name);
2241
2242 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2243 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2244 BT_DBG("skb %p len %d", skb, skb->len);
2245 hci_send_frame(skb);
2246
2247 conn->sent++;
2248 if (conn->sent == ~0)
2249 conn->sent = 0;
2250 }
2251 }
2252}
2253
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002254static inline void hci_sched_esco(struct hci_dev *hdev)
2255{
2256 struct hci_conn *conn;
2257 struct sk_buff *skb;
2258 int quote;
2259
2260 BT_DBG("%s", hdev->name);
2261
2262 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2263 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2264 BT_DBG("skb %p len %d", skb, skb->len);
2265 hci_send_frame(skb);
2266
2267 conn->sent++;
2268 if (conn->sent == ~0)
2269 conn->sent = 0;
2270 }
2271 }
2272}
2273
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002274static inline void hci_sched_le(struct hci_dev *hdev)
2275{
2276 struct hci_conn *conn;
2277 struct sk_buff *skb;
2278 int quote, cnt;
2279
2280 BT_DBG("%s", hdev->name);
2281
2282 if (!test_bit(HCI_RAW, &hdev->flags)) {
2283 /* LE tx timeout must be longer than maximum
2284 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002285 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002286 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002287 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002288 }
2289
2290 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2291 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2292 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2293 BT_DBG("skb %p len %d", skb, skb->len);
2294
2295 hci_send_frame(skb);
2296 hdev->le_last_tx = jiffies;
2297
2298 cnt--;
2299 conn->sent++;
2300 }
2301 }
2302 if (hdev->le_pkts)
2303 hdev->le_cnt = cnt;
2304 else
2305 hdev->acl_cnt = cnt;
2306}
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308static void hci_tx_task(unsigned long arg)
2309{
2310 struct hci_dev *hdev = (struct hci_dev *) arg;
2311 struct sk_buff *skb;
2312
2313 read_lock(&hci_task_lock);
2314
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002315 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2316 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318 /* Schedule queues and send stuff to HCI driver */
2319
2320 hci_sched_acl(hdev);
2321
2322 hci_sched_sco(hdev);
2323
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002324 hci_sched_esco(hdev);
2325
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002326 hci_sched_le(hdev);
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 /* Send next queued raw (unknown type) packet */
2329 while ((skb = skb_dequeue(&hdev->raw_q)))
2330 hci_send_frame(skb);
2331
2332 read_unlock(&hci_task_lock);
2333}
2334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002335/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336
2337/* ACL data packet */
2338static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2339{
2340 struct hci_acl_hdr *hdr = (void *) skb->data;
2341 struct hci_conn *conn;
2342 __u16 handle, flags;
2343
2344 skb_pull(skb, HCI_ACL_HDR_SIZE);
2345
2346 handle = __le16_to_cpu(hdr->handle);
2347 flags = hci_flags(handle);
2348 handle = hci_handle(handle);
2349
2350 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2351
2352 hdev->stat.acl_rx++;
2353
2354 hci_dev_lock(hdev);
2355 conn = hci_conn_hash_lookup_handle(hdev, handle);
2356 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002357
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 if (conn) {
2359 register struct hci_proto *hp;
2360
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002361 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002362
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002364 hp = hci_proto[HCI_PROTO_L2CAP];
2365 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 hp->recv_acldata(conn, skb, flags);
2367 return;
2368 }
2369 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002370 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 hdev->name, handle);
2372 }
2373
2374 kfree_skb(skb);
2375}
2376
2377/* SCO data packet */
2378static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2379{
2380 struct hci_sco_hdr *hdr = (void *) skb->data;
2381 struct hci_conn *conn;
2382 __u16 handle;
2383
2384 skb_pull(skb, HCI_SCO_HDR_SIZE);
2385
2386 handle = __le16_to_cpu(hdr->handle);
2387
2388 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2389
2390 hdev->stat.sco_rx++;
2391
2392 hci_dev_lock(hdev);
2393 conn = hci_conn_hash_lookup_handle(hdev, handle);
2394 hci_dev_unlock(hdev);
2395
2396 if (conn) {
2397 register struct hci_proto *hp;
2398
2399 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002400 hp = hci_proto[HCI_PROTO_SCO];
2401 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 hp->recv_scodata(conn, skb);
2403 return;
2404 }
2405 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002406 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 hdev->name, handle);
2408 }
2409
2410 kfree_skb(skb);
2411}
2412
Marcel Holtmann65164552005-10-28 19:20:48 +02002413static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414{
2415 struct hci_dev *hdev = (struct hci_dev *) arg;
2416 struct sk_buff *skb;
2417
2418 BT_DBG("%s", hdev->name);
2419
2420 read_lock(&hci_task_lock);
2421
2422 while ((skb = skb_dequeue(&hdev->rx_q))) {
2423 if (atomic_read(&hdev->promisc)) {
2424 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002425 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 }
2427
2428 if (test_bit(HCI_RAW, &hdev->flags)) {
2429 kfree_skb(skb);
2430 continue;
2431 }
2432
2433 if (test_bit(HCI_INIT, &hdev->flags)) {
2434 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002435 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 case HCI_ACLDATA_PKT:
2437 case HCI_SCODATA_PKT:
2438 kfree_skb(skb);
2439 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 }
2442
2443 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002444 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 case HCI_EVENT_PKT:
2446 hci_event_packet(hdev, skb);
2447 break;
2448
2449 case HCI_ACLDATA_PKT:
2450 BT_DBG("%s ACL data packet", hdev->name);
2451 hci_acldata_packet(hdev, skb);
2452 break;
2453
2454 case HCI_SCODATA_PKT:
2455 BT_DBG("%s SCO data packet", hdev->name);
2456 hci_scodata_packet(hdev, skb);
2457 break;
2458
2459 default:
2460 kfree_skb(skb);
2461 break;
2462 }
2463 }
2464
2465 read_unlock(&hci_task_lock);
2466}
2467
2468static void hci_cmd_task(unsigned long arg)
2469{
2470 struct hci_dev *hdev = (struct hci_dev *) arg;
2471 struct sk_buff *skb;
2472
2473 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2474
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002476 if (atomic_read(&hdev->cmd_cnt)) {
2477 skb = skb_dequeue(&hdev->cmd_q);
2478 if (!skb)
2479 return;
2480
Wei Yongjun7585b972009-02-25 18:29:52 +08002481 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002483 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2484 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 atomic_dec(&hdev->cmd_cnt);
2486 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002487 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002488 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 } else {
2490 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002491 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 }
2493 }
2494}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002495
2496module_param(enable_smp, bool, 0644);
2497MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");