blob: 47bfde5f01995fba453a21b77834e99e554c7c75 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Brian Gix3cd62042012-01-11 15:18:17 -08003 Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 }
219 skb_queue_purge(&hdev->driver_init);
220
221 /* Mandatory initialization */
222
223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
308}
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __u8 scan = opt;
313
314 BT_DBG("%s %x", hdev->name, scan);
315
316 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200317 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
321{
322 __u8 auth = opt;
323
324 BT_DBG("%s %x", hdev->name, auth);
325
326 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200327 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
331{
332 __u8 encrypt = opt;
333
334 BT_DBG("%s %x", hdev->name, encrypt);
335
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200336 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200337 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200340static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
341{
342 __le16 policy = cpu_to_le16(opt);
343
Marcel Holtmanna418b892008-11-30 12:17:28 +0100344 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200345
346 /* Default link policy */
347 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
348}
349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900350/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 * Device is held on return. */
352struct hci_dev *hci_dev_get(int index)
353{
354 struct hci_dev *hdev = NULL;
355 struct list_head *p;
356
357 BT_DBG("%d", index);
358
359 if (index < 0)
360 return NULL;
361
362 read_lock(&hci_dev_list_lock);
363 list_for_each(p, &hci_dev_list) {
364 struct hci_dev *d = list_entry(p, struct hci_dev, list);
365 if (d->id == index) {
366 hdev = hci_dev_hold(d);
367 break;
368 }
369 }
370 read_unlock(&hci_dev_list_lock);
371 return hdev;
372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375/* ---- Inquiry support ---- */
376static void inquiry_cache_flush(struct hci_dev *hdev)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *next = cache->list, *e;
380
381 BT_DBG("cache %p", cache);
382
383 cache->list = NULL;
384 while ((e = next)) {
385 next = e->next;
386 kfree(e);
387 }
388}
389
390struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_entry *e;
394
395 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
396
397 for (e = cache->list; e; e = e->next)
398 if (!bacmp(&e->data.bdaddr, bdaddr))
399 break;
400 return e;
401}
402
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
404{
405 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200406 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
409
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200410 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
411 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200413 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
414 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416
417 ie->next = cache->list;
418 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200421 memcpy(&ie->data, data, sizeof(*data));
422 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 cache->timestamp = jiffies;
424}
425
426static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
427{
428 struct inquiry_cache *cache = &hdev->inq_cache;
429 struct inquiry_info *info = (struct inquiry_info *) buf;
430 struct inquiry_entry *e;
431 int copied = 0;
432
433 for (e = cache->list; e && copied < num; e = e->next, copied++) {
434 struct inquiry_data *data = &e->data;
435 bacpy(&info->bdaddr, &data->bdaddr);
436 info->pscan_rep_mode = data->pscan_rep_mode;
437 info->pscan_period_mode = data->pscan_period_mode;
438 info->pscan_mode = data->pscan_mode;
439 memcpy(info->dev_class, data->dev_class, 3);
440 info->clock_offset = data->clock_offset;
441 info++;
442 }
443
444 BT_DBG("cache %p, copied %d", cache, copied);
445 return copied;
446}
447
448static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
449{
450 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
451 struct hci_cp_inquiry cp;
452
453 BT_DBG("%s", hdev->name);
454
455 if (test_bit(HCI_INQUIRY, &hdev->flags))
456 return;
457
458 /* Start Inquiry */
459 memcpy(&cp.lap, &ir->lap, 3);
460 cp.length = ir->length;
461 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200462 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465int hci_inquiry(void __user *arg)
466{
467 __u8 __user *ptr = arg;
468 struct hci_inquiry_req ir;
469 struct hci_dev *hdev;
470 int err = 0, do_inquiry = 0, max_rsp;
471 long timeo;
472 __u8 *buf;
473
474 if (copy_from_user(&ir, ptr, sizeof(ir)))
475 return -EFAULT;
476
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200477 hdev = hci_dev_get(ir.dev_id);
478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -ENODEV;
480
481 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900482 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 inquiry_cache_empty(hdev) ||
484 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 inquiry_cache_flush(hdev);
486 do_inquiry = 1;
487 }
488 hci_dev_unlock_bh(hdev);
489
Marcel Holtmann04837f62006-07-03 10:02:33 +0200490 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491
492 if (do_inquiry) {
493 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
494 if (err < 0)
495 goto done;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* for unlimited number of responses we will use buffer with 255 entries */
499 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
500
501 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
502 * copy it to the user space.
503 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100504 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -ENOMEM;
507 goto done;
508 }
509
510 hci_dev_lock_bh(hdev);
511 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
512 hci_dev_unlock_bh(hdev);
513
514 BT_DBG("num_rsp %d", ir.num_rsp);
515
516 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
517 ptr += sizeof(ir);
518 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 ir.num_rsp))
520 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900521 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 err = -EFAULT;
523
524 kfree(buf);
525
526done:
527 hci_dev_put(hdev);
528 return err;
529}
530
531/* ---- HCI ioctl helpers ---- */
532
533int hci_dev_open(__u16 dev)
534{
535 struct hci_dev *hdev;
536 int ret = 0;
537
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200538 hdev = hci_dev_get(dev);
539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -ENODEV;
541
542 BT_DBG("%s %p", hdev->name, hdev);
543
544 hci_req_lock(hdev);
545
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200546 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
547 ret = -ERFKILL;
548 goto done;
549 }
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (test_bit(HCI_UP, &hdev->flags)) {
552 ret = -EALREADY;
553 goto done;
554 }
555
556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
557 set_bit(HCI_RAW, &hdev->flags);
558
559 if (hdev->open(hdev)) {
560 ret = -EIO;
561 goto done;
562 }
563
564 if (!test_bit(HCI_RAW, &hdev->flags)) {
565 atomic_set(&hdev->cmd_cnt, 1);
566 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200567 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Marcel Holtmann04837f62006-07-03 10:02:33 +0200569 ret = __hci_request(hdev, hci_init_req, 0,
570 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300573 ret = __hci_request(hdev, hci_le_init_req, 0,
574 msecs_to_jiffies(HCI_INIT_TIMEOUT));
575
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 clear_bit(HCI_INIT, &hdev->flags);
577 }
578
579 if (!ret) {
580 hci_dev_hold(hdev);
581 set_bit(HCI_UP, &hdev->flags);
582 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700583 if (!test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800584 hdev->dev_type == HCI_BREDR) {
585 hci_dev_lock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200586 mgmt_powered(hdev->id, 1);
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800587 hci_dev_unlock_bh(hdev);
588 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900589 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 /* Init failed, cleanup */
591 tasklet_kill(&hdev->rx_task);
592 tasklet_kill(&hdev->tx_task);
593 tasklet_kill(&hdev->cmd_task);
594
595 skb_queue_purge(&hdev->cmd_q);
596 skb_queue_purge(&hdev->rx_q);
597
598 if (hdev->flush)
599 hdev->flush(hdev);
600
601 if (hdev->sent_cmd) {
602 kfree_skb(hdev->sent_cmd);
603 hdev->sent_cmd = NULL;
604 }
605
606 hdev->close(hdev);
607 hdev->flags = 0;
608 }
609
610done:
611 hci_req_unlock(hdev);
612 hci_dev_put(hdev);
613 return ret;
614}
615
616static int hci_dev_do_close(struct hci_dev *hdev)
617{
Mat Martineau4106b992011-11-18 15:26:21 -0800618 unsigned long keepflags = 0;
619
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 BT_DBG("%s %p", hdev->name, hdev);
621
622 hci_req_cancel(hdev, ENODEV);
623 hci_req_lock(hdev);
624
625 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300626 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 hci_req_unlock(hdev);
628 return 0;
629 }
630
631 /* Kill RX and TX tasks */
632 tasklet_kill(&hdev->rx_task);
633 tasklet_kill(&hdev->tx_task);
634
635 hci_dev_lock_bh(hdev);
636 inquiry_cache_flush(hdev);
637 hci_conn_hash_flush(hdev);
638 hci_dev_unlock_bh(hdev);
639
640 hci_notify(hdev, HCI_DEV_DOWN);
641
Bhasker Netiffdff572011-12-21 17:24:01 -0800642 if (hdev->dev_type == HCI_BREDR) {
643 hci_dev_lock_bh(hdev);
644 mgmt_powered(hdev->id, 0);
645 hci_dev_unlock_bh(hdev);
646 }
647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (hdev->flush)
649 hdev->flush(hdev);
650
651 /* Reset device */
652 skb_queue_purge(&hdev->cmd_q);
653 atomic_set(&hdev->cmd_cnt, 1);
654 if (!test_bit(HCI_RAW, &hdev->flags)) {
655 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200656 __hci_request(hdev, hci_reset_req, 0,
657 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 clear_bit(HCI_INIT, &hdev->flags);
659 }
660
661 /* Kill cmd task */
662 tasklet_kill(&hdev->cmd_task);
663
664 /* Drop queues */
665 skb_queue_purge(&hdev->rx_q);
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->raw_q);
668
669 /* Drop last sent command */
670 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300671 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 kfree_skb(hdev->sent_cmd);
673 hdev->sent_cmd = NULL;
674 }
675
676 /* After this point our queues are empty
677 * and no tasks are scheduled. */
678 hdev->close(hdev);
679
Mat Martineau4106b992011-11-18 15:26:21 -0800680 /* Clear only non-persistent flags */
681 if (test_bit(HCI_MGMT, &hdev->flags))
682 set_bit(HCI_MGMT, &keepflags);
683 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
684 set_bit(HCI_LINK_KEYS, &keepflags);
685 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
686 set_bit(HCI_DEBUG_KEYS, &keepflags);
687
688 hdev->flags = keepflags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 hci_req_unlock(hdev);
691
692 hci_dev_put(hdev);
693 return 0;
694}
695
696int hci_dev_close(__u16 dev)
697{
698 struct hci_dev *hdev;
699 int err;
700
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200701 hdev = hci_dev_get(dev);
702 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 return -ENODEV;
704 err = hci_dev_do_close(hdev);
705 hci_dev_put(hdev);
706 return err;
707}
708
709int hci_dev_reset(__u16 dev)
710{
711 struct hci_dev *hdev;
712 int ret = 0;
713
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200714 hdev = hci_dev_get(dev);
715 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return -ENODEV;
717
718 hci_req_lock(hdev);
719 tasklet_disable(&hdev->tx_task);
720
721 if (!test_bit(HCI_UP, &hdev->flags))
722 goto done;
723
724 /* Drop queues */
725 skb_queue_purge(&hdev->rx_q);
726 skb_queue_purge(&hdev->cmd_q);
727
728 hci_dev_lock_bh(hdev);
729 inquiry_cache_flush(hdev);
730 hci_conn_hash_flush(hdev);
731 hci_dev_unlock_bh(hdev);
732
733 if (hdev->flush)
734 hdev->flush(hdev);
735
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900736 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300737 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200740 ret = __hci_request(hdev, hci_reset_req, 0,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743done:
744 tasklet_enable(&hdev->tx_task);
745 hci_req_unlock(hdev);
746 hci_dev_put(hdev);
747 return ret;
748}
749
750int hci_dev_reset_stat(__u16 dev)
751{
752 struct hci_dev *hdev;
753 int ret = 0;
754
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200755 hdev = hci_dev_get(dev);
756 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 return -ENODEV;
758
759 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
760
761 hci_dev_put(hdev);
762
763 return ret;
764}
765
766int hci_dev_cmd(unsigned int cmd, void __user *arg)
767{
768 struct hci_dev *hdev;
769 struct hci_dev_req dr;
770 int err = 0;
771
772 if (copy_from_user(&dr, arg, sizeof(dr)))
773 return -EFAULT;
774
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200775 hdev = hci_dev_get(dr.dev_id);
776 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 return -ENODEV;
778
779 switch (cmd) {
780 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETENCRYPT:
786 if (!lmp_encrypt_capable(hdev)) {
787 err = -EOPNOTSUPP;
788 break;
789 }
790
791 if (!test_bit(HCI_AUTH, &hdev->flags)) {
792 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200793 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 if (err)
796 break;
797 }
798
Marcel Holtmann04837f62006-07-03 10:02:33 +0200799 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
800 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 break;
802
803 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200804 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
805 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 break;
807
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200808 case HCISETLINKPOL:
809 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT));
811 break;
812
813 case HCISETLINKMODE:
814 hdev->link_mode = ((__u16) dr.dev_opt) &
815 (HCI_LM_MASTER | HCI_LM_ACCEPT);
816 break;
817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 case HCISETPTYPE:
819 hdev->pkt_type = (__u16) dr.dev_opt;
820 break;
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200823 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
824 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 break;
826
827 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200828 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
829 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 break;
831
832 default:
833 err = -EINVAL;
834 break;
835 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 hci_dev_put(hdev);
838 return err;
839}
840
841int hci_get_dev_list(void __user *arg)
842{
843 struct hci_dev_list_req *dl;
844 struct hci_dev_req *dr;
845 struct list_head *p;
846 int n = 0, size, err;
847 __u16 dev_num;
848
849 if (get_user(dev_num, (__u16 __user *) arg))
850 return -EFAULT;
851
852 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
853 return -EINVAL;
854
855 size = sizeof(*dl) + dev_num * sizeof(*dr);
856
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200857 dl = kzalloc(size, GFP_KERNEL);
858 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return -ENOMEM;
860
861 dr = dl->dev_req;
862
863 read_lock_bh(&hci_dev_list_lock);
864 list_for_each(p, &hci_dev_list) {
865 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200868
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200869 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200870
871 if (!test_bit(HCI_MGMT, &hdev->flags))
872 set_bit(HCI_PAIRABLE, &hdev->flags);
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 (dr + n)->dev_id = hdev->id;
875 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200876
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 if (++n >= dev_num)
878 break;
879 }
880 read_unlock_bh(&hci_dev_list_lock);
881
882 dl->dev_num = n;
883 size = sizeof(*dl) + n * sizeof(*dr);
884
885 err = copy_to_user(arg, dl, size);
886 kfree(dl);
887
888 return err ? -EFAULT : 0;
889}
890
891int hci_get_dev_info(void __user *arg)
892{
893 struct hci_dev *hdev;
894 struct hci_dev_info di;
895 int err = 0;
896
897 if (copy_from_user(&di, arg, sizeof(di)))
898 return -EFAULT;
899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900 hdev = hci_dev_get(di.dev_id);
901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -ENODEV;
903
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200904 hci_del_off_timer(hdev);
905
Johan Hedbergc542a062011-01-26 13:11:03 +0200906 if (!test_bit(HCI_MGMT, &hdev->flags))
907 set_bit(HCI_PAIRABLE, &hdev->flags);
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 strcpy(di.name, hdev->name);
910 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100911 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 di.flags = hdev->flags;
913 di.pkt_type = hdev->pkt_type;
914 di.acl_mtu = hdev->acl_mtu;
915 di.acl_pkts = hdev->acl_pkts;
916 di.sco_mtu = hdev->sco_mtu;
917 di.sco_pkts = hdev->sco_pkts;
918 di.link_policy = hdev->link_policy;
919 di.link_mode = hdev->link_mode;
920
921 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
922 memcpy(&di.features, &hdev->features, sizeof(di.features));
923
924 if (copy_to_user(arg, &di, sizeof(di)))
925 err = -EFAULT;
926
927 hci_dev_put(hdev);
928
929 return err;
930}
931
932/* ---- Interface to HCI drivers ---- */
933
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200934static int hci_rfkill_set_block(void *data, bool blocked)
935{
936 struct hci_dev *hdev = data;
937
938 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
939
940 if (!blocked)
941 return 0;
942
943 hci_dev_do_close(hdev);
944
945 return 0;
946}
947
948static const struct rfkill_ops hci_rfkill_ops = {
949 .set_block = hci_rfkill_set_block,
950};
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952/* Alloc HCI device */
953struct hci_dev *hci_alloc_dev(void)
954{
955 struct hci_dev *hdev;
956
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200957 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 if (!hdev)
959 return NULL;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 skb_queue_head_init(&hdev->driver_init);
962
963 return hdev;
964}
965EXPORT_SYMBOL(hci_alloc_dev);
966
967/* Free HCI device */
968void hci_free_dev(struct hci_dev *hdev)
969{
970 skb_queue_purge(&hdev->driver_init);
971
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200972 /* will free via device release */
973 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974}
975EXPORT_SYMBOL(hci_free_dev);
976
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200977static void hci_power_on(struct work_struct *work)
978{
979 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700980 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200981
982 BT_DBG("%s", hdev->name);
983
Inga Stotland5029fc22011-09-12 15:22:52 -0700984 err = hci_dev_open(hdev->id);
985 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986 return;
987
Peter Krystad1fc44072011-08-30 15:38:12 -0700988 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
989 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200990 mod_timer(&hdev->off_timer,
991 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
992
Peter Krystad1fc44072011-08-30 15:38:12 -0700993 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
994 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200995 mgmt_index_added(hdev->id);
996}
997
998static void hci_power_off(struct work_struct *work)
999{
1000 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
1001
1002 BT_DBG("%s", hdev->name);
1003
1004 hci_dev_close(hdev->id);
1005}
1006
1007static void hci_auto_off(unsigned long data)
1008{
1009 struct hci_dev *hdev = (struct hci_dev *) data;
1010
1011 BT_DBG("%s", hdev->name);
1012
1013 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1014
1015 queue_work(hdev->workqueue, &hdev->power_off);
1016}
1017
1018void hci_del_off_timer(struct hci_dev *hdev)
1019{
1020 BT_DBG("%s", hdev->name);
1021
1022 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1023 del_timer(&hdev->off_timer);
1024}
1025
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001026int hci_uuids_clear(struct hci_dev *hdev)
1027{
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->uuids) {
1031 struct bt_uuid *uuid;
1032
1033 uuid = list_entry(p, struct bt_uuid, list);
1034
1035 list_del(p);
1036 kfree(uuid);
1037 }
1038
1039 return 0;
1040}
1041
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001042int hci_link_keys_clear(struct hci_dev *hdev)
1043{
1044 struct list_head *p, *n;
1045
1046 list_for_each_safe(p, n, &hdev->link_keys) {
1047 struct link_key *key;
1048
1049 key = list_entry(p, struct link_key, list);
1050
1051 list_del(p);
1052 kfree(key);
1053 }
1054
1055 return 0;
1056}
1057
1058struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059{
1060 struct list_head *p;
1061
1062 list_for_each(p, &hdev->link_keys) {
1063 struct link_key *k;
1064
1065 k = list_entry(p, struct link_key, list);
1066
1067 if (bacmp(bdaddr, &k->bdaddr) == 0)
1068 return k;
1069 }
1070
1071 return NULL;
1072}
1073
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001074struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1075{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001076 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001077
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001078 list_for_each(p, &hdev->link_keys) {
1079 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001080 struct key_master_id *id;
1081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001082 k = list_entry(p, struct link_key, list);
1083
Brian Gixcf956772011-10-20 15:18:51 -07001084 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001085 continue;
1086
1087 if (k->dlen != sizeof(*id))
1088 continue;
1089
1090 id = (void *) &k->data;
1091 if (id->ediv == ediv &&
1092 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1093 return k;
1094 }
1095
1096 return NULL;
1097}
1098EXPORT_SYMBOL(hci_find_ltk);
1099
1100struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1101 bdaddr_t *bdaddr, u8 type)
1102{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001103 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 list_for_each(p, &hdev->link_keys) {
1106 struct link_key *k;
1107
1108 k = list_entry(p, struct link_key, list);
1109
Brian Gixcf956772011-10-20 15:18:51 -07001110 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001111 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001112 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001113
1114 return NULL;
1115}
1116EXPORT_SYMBOL(hci_find_link_key_type);
1117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1119 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001120{
1121 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001122 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001123 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001124 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001125
1126 old_key = hci_find_link_key(hdev, bdaddr);
1127 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001128 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 key = old_key;
1130 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001132 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1133 if (!key)
1134 return -ENOMEM;
1135 list_add(&key->list, &hdev->link_keys);
1136 }
1137
1138 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1139
1140 bacpy(&key->bdaddr, bdaddr);
1141 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001142 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001143 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001144 key->pin_len = pin_len;
1145
Brian Gixa68668b2011-08-11 15:49:36 -07001146 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301147 /* Store the link key persistently if one of the following is true:
1148 * 1. the remote side is using dedicated bonding since in that case
1149 * also the local requirements are set to dedicated bonding
1150 * 2. the local side had dedicated bonding as a requirement
1151 * 3. this is a legacy link key
1152 * 4. this is a changed combination key and there was a previously
1153 * stored one
1154 * If none of the above match only keep the link key around for
1155 * this connection and set the temporary flag for the device.
1156 */
Brian Gixa68668b2011-08-11 15:49:36 -07001157
Brian Gixdfdd9362011-08-18 09:58:02 -07001158 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301159 if ((conn->remote_auth > 0x01) ||
1160 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001161 (key->key_type < 0x03) ||
1162 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001163 bonded = 1;
1164 }
Brian Gixa68668b2011-08-11 15:49:36 -07001165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001166 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001167 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168
1169 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001170 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001171
1172 return 0;
1173}
1174
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001175int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001176 u8 addr_type, u8 key_size, u8 auth,
1177 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001178{
1179 struct link_key *key, *old_key;
1180 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001181
Brian Gixcf956772011-10-20 15:18:51 -07001182 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1183 batostr(bdaddr), addr_type);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001186 if (old_key) {
1187 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001188 } else {
1189 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1190 if (!key)
1191 return -ENOMEM;
1192 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001193 }
1194
1195 key->dlen = sizeof(*id);
1196
1197 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001198 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001199 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001200 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001201 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001202 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001203
1204 id = (void *) &key->data;
1205 id->ediv = ediv;
1206 memcpy(id->rand, rand, sizeof(id->rand));
1207
1208 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001209 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001210
1211 return 0;
1212}
1213
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001214int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215{
1216 struct link_key *key;
1217
1218 key = hci_find_link_key(hdev, bdaddr);
1219 if (!key)
1220 return -ENOENT;
1221
1222 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1223
1224 list_del(&key->list);
1225 kfree(key);
1226
1227 return 0;
1228}
1229
Ville Tervo6bd32322011-02-16 16:32:41 +02001230/* HCI command timer function */
1231static void hci_cmd_timer(unsigned long arg)
1232{
1233 struct hci_dev *hdev = (void *) arg;
1234
1235 BT_ERR("%s command tx timeout", hdev->name);
1236 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001238 tasklet_schedule(&hdev->cmd_task);
1239}
1240
Szymon Janc2763eda2011-03-22 13:12:22 +01001241struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1242 bdaddr_t *bdaddr)
1243{
1244 struct oob_data *data;
1245
1246 list_for_each_entry(data, &hdev->remote_oob_data, list)
1247 if (bacmp(bdaddr, &data->bdaddr) == 0)
1248 return data;
1249
1250 return NULL;
1251}
1252
1253int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1254{
1255 struct oob_data *data;
1256
1257 data = hci_find_remote_oob_data(hdev, bdaddr);
1258 if (!data)
1259 return -ENOENT;
1260
1261 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1262
1263 list_del(&data->list);
1264 kfree(data);
1265
1266 return 0;
1267}
1268
1269int hci_remote_oob_data_clear(struct hci_dev *hdev)
1270{
1271 struct oob_data *data, *n;
1272
1273 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1274 list_del(&data->list);
1275 kfree(data);
1276 }
1277
1278 return 0;
1279}
1280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001281static void hci_adv_clear(unsigned long arg)
1282{
1283 struct hci_dev *hdev = (void *) arg;
1284
1285 hci_adv_entries_clear(hdev);
1286}
1287
1288int hci_adv_entries_clear(struct hci_dev *hdev)
1289{
1290 struct list_head *p, *n;
1291
Brian Gixa68668b2011-08-11 15:49:36 -07001292 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001293 write_lock_bh(&hdev->adv_entries_lock);
1294
1295 list_for_each_safe(p, n, &hdev->adv_entries) {
1296 struct adv_entry *entry;
1297
1298 entry = list_entry(p, struct adv_entry, list);
1299
1300 list_del(p);
1301 kfree(entry);
1302 }
1303
1304 write_unlock_bh(&hdev->adv_entries_lock);
1305
1306 return 0;
1307}
1308
1309struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct list_head *p;
1312 struct adv_entry *res = NULL;
1313
Brian Gixa68668b2011-08-11 15:49:36 -07001314 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 read_lock_bh(&hdev->adv_entries_lock);
1316
1317 list_for_each(p, &hdev->adv_entries) {
1318 struct adv_entry *entry;
1319
1320 entry = list_entry(p, struct adv_entry, list);
1321
1322 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1323 res = entry;
1324 goto out;
1325 }
1326 }
1327out:
1328 read_unlock_bh(&hdev->adv_entries_lock);
1329 return res;
1330}
1331
1332static inline int is_connectable_adv(u8 evt_type)
1333{
1334 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1335 return 1;
1336
1337 return 0;
1338}
1339
Szymon Janc2763eda2011-03-22 13:12:22 +01001340int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1341 u8 *randomizer)
1342{
1343 struct oob_data *data;
1344
1345 data = hci_find_remote_oob_data(hdev, bdaddr);
1346
1347 if (!data) {
1348 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1349 if (!data)
1350 return -ENOMEM;
1351
1352 bacpy(&data->bdaddr, bdaddr);
1353 list_add(&data->list, &hdev->remote_oob_data);
1354 }
1355
1356 memcpy(data->hash, hash, sizeof(data->hash));
1357 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1358
1359 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1360
1361 return 0;
1362}
1363
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001364int hci_add_adv_entry(struct hci_dev *hdev,
1365 struct hci_ev_le_advertising_info *ev)
1366{
1367 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001368 u8 flags = 0;
1369 int i;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001370
Brian Gixa68668b2011-08-11 15:49:36 -07001371 BT_DBG("");
1372
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001373 if (!is_connectable_adv(ev->evt_type))
1374 return -EINVAL;
1375
Brian Gixfdd38922011-09-28 16:23:48 -07001376 if (ev->data && ev->length) {
1377 for (i = 0; (i + 2) < ev->length; i++)
1378 if (ev->data[i+1] == 0x01) {
1379 flags = ev->data[i+2];
1380 BT_DBG("flags: %2.2x", flags);
1381 break;
1382 } else {
1383 i += ev->data[i];
1384 }
1385 }
1386
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001388 /* Only new entries should be added to adv_entries. So, if
1389 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001390 if (entry) {
1391 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001392 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001393 }
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001394
1395 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1396 if (!entry)
1397 return -ENOMEM;
1398
1399 bacpy(&entry->bdaddr, &ev->bdaddr);
1400 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001401 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001404 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001406
1407 return 0;
1408}
1409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410static struct crypto_blkcipher *alloc_cypher(void)
1411{
1412 if (enable_smp)
1413 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1414
1415 return ERR_PTR(-ENOTSUPP);
1416}
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418/* Register HCI device */
1419int hci_register_dev(struct hci_dev *hdev)
1420{
1421 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001422 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001424 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1425 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 if (!hdev->open || !hdev->close || !hdev->destruct)
1428 return -EINVAL;
1429
Peter Krystad462bf762011-09-19 14:20:20 -07001430 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 write_lock_bh(&hci_dev_list_lock);
1433
1434 /* Find first available device id */
1435 list_for_each(p, &hci_dev_list) {
1436 if (list_entry(p, struct hci_dev, list)->id != id)
1437 break;
1438 head = p; id++;
1439 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 sprintf(hdev->name, "hci%d", id);
1442 hdev->id = id;
1443 list_add(&hdev->list, head);
1444
1445 atomic_set(&hdev->refcnt, 1);
1446 spin_lock_init(&hdev->lock);
1447
1448 hdev->flags = 0;
1449 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001450 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001452 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
Marcel Holtmann04837f62006-07-03 10:02:33 +02001454 hdev->idle_timeout = 0;
1455 hdev->sniff_max_interval = 800;
1456 hdev->sniff_min_interval = 80;
1457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1460 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1461
1462 skb_queue_head_init(&hdev->rx_q);
1463 skb_queue_head_init(&hdev->cmd_q);
1464 skb_queue_head_init(&hdev->raw_q);
1465
Ville Tervo6bd32322011-02-16 16:32:41 +02001466 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
Brian Gix568dde92012-01-11 16:18:04 -08001467 setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
1468 (unsigned long) hdev);
1469 setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
1470 (unsigned long) hdev);
Ville Tervo6bd32322011-02-16 16:32:41 +02001471
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301472 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001473 hdev->reassembly[i] = NULL;
1474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001476 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477
1478 inquiry_cache_init(hdev);
1479
1480 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001481 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482
David Millerea4bd8b2010-07-30 21:54:49 -07001483 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001484
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001485 INIT_LIST_HEAD(&hdev->uuids);
1486
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001487 INIT_LIST_HEAD(&hdev->link_keys);
1488
Szymon Janc2763eda2011-03-22 13:12:22 +01001489 INIT_LIST_HEAD(&hdev->remote_oob_data);
1490
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001491 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001492 rwlock_init(&hdev->adv_entries_lock);
1493 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001494
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001495 INIT_WORK(&hdev->power_on, hci_power_on);
1496 INIT_WORK(&hdev->power_off, hci_power_off);
1497 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1500
1501 atomic_set(&hdev->promisc, 0);
1502
1503 write_unlock_bh(&hci_dev_list_lock);
1504
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001505 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1506 if (!hdev->workqueue)
1507 goto nomem;
1508
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001509 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001510 if (IS_ERR(hdev->tfm))
1511 BT_INFO("Failed to load transform for ecb(aes): %ld",
1512 PTR_ERR(hdev->tfm));
1513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 hci_register_sysfs(hdev);
1515
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001516 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1517 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1518 if (hdev->rfkill) {
1519 if (rfkill_register(hdev->rfkill) < 0) {
1520 rfkill_destroy(hdev->rfkill);
1521 hdev->rfkill = NULL;
1522 }
1523 }
1524
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001525 set_bit(HCI_AUTO_OFF, &hdev->flags);
1526 set_bit(HCI_SETUP, &hdev->flags);
1527 queue_work(hdev->workqueue, &hdev->power_on);
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 hci_notify(hdev, HCI_DEV_REG);
1530
1531 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001532
1533nomem:
1534 write_lock_bh(&hci_dev_list_lock);
1535 list_del(&hdev->list);
1536 write_unlock_bh(&hci_dev_list_lock);
1537
1538 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539}
1540EXPORT_SYMBOL(hci_register_dev);
1541
1542/* Unregister HCI device */
1543int hci_unregister_dev(struct hci_dev *hdev)
1544{
Marcel Holtmannef222012007-07-11 06:42:04 +02001545 int i;
1546
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001547 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 write_lock_bh(&hci_dev_list_lock);
1550 list_del(&hdev->list);
1551 write_unlock_bh(&hci_dev_list_lock);
1552
1553 hci_dev_do_close(hdev);
1554
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301555 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001556 kfree_skb(hdev->reassembly[i]);
1557
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001558 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001559 !test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001560 hdev->dev_type == HCI_BREDR) {
1561 hci_dev_lock_bh(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001562 mgmt_index_removed(hdev->id);
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001563 hci_dev_unlock_bh(hdev);
1564 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001565
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001566 if (!IS_ERR(hdev->tfm))
1567 crypto_free_blkcipher(hdev->tfm);
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 hci_notify(hdev, HCI_DEV_UNREG);
1570
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001571 if (hdev->rfkill) {
1572 rfkill_unregister(hdev->rfkill);
1573 rfkill_destroy(hdev->rfkill);
1574 }
1575
Dave Young147e2d52008-03-05 18:45:59 -08001576 hci_unregister_sysfs(hdev);
1577
Brian Gix3cd62042012-01-11 15:18:17 -08001578 /* Disable all timers */
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001579 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001580 del_timer(&hdev->adv_timer);
Brian Gix3cd62042012-01-11 15:18:17 -08001581 del_timer(&hdev->cmd_timer);
Brian Gix568dde92012-01-11 16:18:04 -08001582 del_timer(&hdev->disco_timer);
1583 del_timer(&hdev->disco_le_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001584
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001585 destroy_workqueue(hdev->workqueue);
1586
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001587 hci_dev_lock_bh(hdev);
1588 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001589 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001590 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001591 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001592 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001593 hci_dev_unlock_bh(hdev);
1594
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 return 0;
1598}
1599EXPORT_SYMBOL(hci_unregister_dev);
1600
1601/* Suspend HCI device */
1602int hci_suspend_dev(struct hci_dev *hdev)
1603{
1604 hci_notify(hdev, HCI_DEV_SUSPEND);
1605 return 0;
1606}
1607EXPORT_SYMBOL(hci_suspend_dev);
1608
1609/* Resume HCI device */
1610int hci_resume_dev(struct hci_dev *hdev)
1611{
1612 hci_notify(hdev, HCI_DEV_RESUME);
1613 return 0;
1614}
1615EXPORT_SYMBOL(hci_resume_dev);
1616
Marcel Holtmann76bca882009-11-18 00:40:39 +01001617/* Receive frame from HCI drivers */
1618int hci_recv_frame(struct sk_buff *skb)
1619{
1620 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1621 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1622 && !test_bit(HCI_INIT, &hdev->flags))) {
1623 kfree_skb(skb);
1624 return -ENXIO;
1625 }
1626
1627 /* Incomming skb */
1628 bt_cb(skb)->incoming = 1;
1629
1630 /* Time stamp */
1631 __net_timestamp(skb);
1632
1633 /* Queue frame for rx task */
1634 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001635 tasklet_schedule(&hdev->rx_task);
1636
Marcel Holtmann76bca882009-11-18 00:40:39 +01001637 return 0;
1638}
1639EXPORT_SYMBOL(hci_recv_frame);
1640
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301641static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001642 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301643{
1644 int len = 0;
1645 int hlen = 0;
1646 int remain = count;
1647 struct sk_buff *skb;
1648 struct bt_skb_cb *scb;
1649
1650 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1651 index >= NUM_REASSEMBLY)
1652 return -EILSEQ;
1653
1654 skb = hdev->reassembly[index];
1655
1656 if (!skb) {
1657 switch (type) {
1658 case HCI_ACLDATA_PKT:
1659 len = HCI_MAX_FRAME_SIZE;
1660 hlen = HCI_ACL_HDR_SIZE;
1661 break;
1662 case HCI_EVENT_PKT:
1663 len = HCI_MAX_EVENT_SIZE;
1664 hlen = HCI_EVENT_HDR_SIZE;
1665 break;
1666 case HCI_SCODATA_PKT:
1667 len = HCI_MAX_SCO_SIZE;
1668 hlen = HCI_SCO_HDR_SIZE;
1669 break;
1670 }
1671
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001672 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301673 if (!skb)
1674 return -ENOMEM;
1675
1676 scb = (void *) skb->cb;
1677 scb->expect = hlen;
1678 scb->pkt_type = type;
1679
1680 skb->dev = (void *) hdev;
1681 hdev->reassembly[index] = skb;
1682 }
1683
1684 while (count) {
1685 scb = (void *) skb->cb;
1686 len = min(scb->expect, (__u16)count);
1687
1688 memcpy(skb_put(skb, len), data, len);
1689
1690 count -= len;
1691 data += len;
1692 scb->expect -= len;
1693 remain = count;
1694
1695 switch (type) {
1696 case HCI_EVENT_PKT:
1697 if (skb->len == HCI_EVENT_HDR_SIZE) {
1698 struct hci_event_hdr *h = hci_event_hdr(skb);
1699 scb->expect = h->plen;
1700
1701 if (skb_tailroom(skb) < scb->expect) {
1702 kfree_skb(skb);
1703 hdev->reassembly[index] = NULL;
1704 return -ENOMEM;
1705 }
1706 }
1707 break;
1708
1709 case HCI_ACLDATA_PKT:
1710 if (skb->len == HCI_ACL_HDR_SIZE) {
1711 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1712 scb->expect = __le16_to_cpu(h->dlen);
1713
1714 if (skb_tailroom(skb) < scb->expect) {
1715 kfree_skb(skb);
1716 hdev->reassembly[index] = NULL;
1717 return -ENOMEM;
1718 }
1719 }
1720 break;
1721
1722 case HCI_SCODATA_PKT:
1723 if (skb->len == HCI_SCO_HDR_SIZE) {
1724 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1725 scb->expect = h->dlen;
1726
1727 if (skb_tailroom(skb) < scb->expect) {
1728 kfree_skb(skb);
1729 hdev->reassembly[index] = NULL;
1730 return -ENOMEM;
1731 }
1732 }
1733 break;
1734 }
1735
1736 if (scb->expect == 0) {
1737 /* Complete frame */
1738
1739 bt_cb(skb)->pkt_type = type;
1740 hci_recv_frame(skb);
1741
1742 hdev->reassembly[index] = NULL;
1743 return remain;
1744 }
1745 }
1746
1747 return remain;
1748}
1749
Marcel Holtmannef222012007-07-11 06:42:04 +02001750int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1751{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301752 int rem = 0;
1753
Marcel Holtmannef222012007-07-11 06:42:04 +02001754 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1755 return -EILSEQ;
1756
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001757 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001758 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301759 if (rem < 0)
1760 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001761
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301762 data += (count - rem);
1763 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001764 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001765
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301766 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001767}
1768EXPORT_SYMBOL(hci_recv_fragment);
1769
Suraj Sumangala99811512010-07-14 13:02:19 +05301770#define STREAM_REASSEMBLY 0
1771
1772int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1773{
1774 int type;
1775 int rem = 0;
1776
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001777 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301778 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1779
1780 if (!skb) {
1781 struct { char type; } *pkt;
1782
1783 /* Start of the frame */
1784 pkt = data;
1785 type = pkt->type;
1786
1787 data++;
1788 count--;
1789 } else
1790 type = bt_cb(skb)->pkt_type;
1791
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001792 rem = hci_reassembly(hdev, type, data, count,
1793 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301794 if (rem < 0)
1795 return rem;
1796
1797 data += (count - rem);
1798 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001799 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301800
1801 return rem;
1802}
1803EXPORT_SYMBOL(hci_recv_stream_fragment);
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805/* ---- Interface to upper protocols ---- */
1806
1807/* Register/Unregister protocols.
1808 * hci_task_lock is used to ensure that no tasks are running. */
1809int hci_register_proto(struct hci_proto *hp)
1810{
1811 int err = 0;
1812
1813 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1814
1815 if (hp->id >= HCI_MAX_PROTO)
1816 return -EINVAL;
1817
1818 write_lock_bh(&hci_task_lock);
1819
1820 if (!hci_proto[hp->id])
1821 hci_proto[hp->id] = hp;
1822 else
1823 err = -EEXIST;
1824
1825 write_unlock_bh(&hci_task_lock);
1826
1827 return err;
1828}
1829EXPORT_SYMBOL(hci_register_proto);
1830
1831int hci_unregister_proto(struct hci_proto *hp)
1832{
1833 int err = 0;
1834
1835 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1836
1837 if (hp->id >= HCI_MAX_PROTO)
1838 return -EINVAL;
1839
1840 write_lock_bh(&hci_task_lock);
1841
1842 if (hci_proto[hp->id])
1843 hci_proto[hp->id] = NULL;
1844 else
1845 err = -ENOENT;
1846
1847 write_unlock_bh(&hci_task_lock);
1848
1849 return err;
1850}
1851EXPORT_SYMBOL(hci_unregister_proto);
1852
1853int hci_register_cb(struct hci_cb *cb)
1854{
1855 BT_DBG("%p name %s", cb, cb->name);
1856
1857 write_lock_bh(&hci_cb_list_lock);
1858 list_add(&cb->list, &hci_cb_list);
1859 write_unlock_bh(&hci_cb_list_lock);
1860
1861 return 0;
1862}
1863EXPORT_SYMBOL(hci_register_cb);
1864
1865int hci_unregister_cb(struct hci_cb *cb)
1866{
1867 BT_DBG("%p name %s", cb, cb->name);
1868
1869 write_lock_bh(&hci_cb_list_lock);
1870 list_del(&cb->list);
1871 write_unlock_bh(&hci_cb_list_lock);
1872
1873 return 0;
1874}
1875EXPORT_SYMBOL(hci_unregister_cb);
1876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001877int hci_register_amp(struct amp_mgr_cb *cb)
1878{
1879 BT_DBG("%p", cb);
1880
1881 write_lock_bh(&amp_mgr_cb_list_lock);
1882 list_add(&cb->list, &amp_mgr_cb_list);
1883 write_unlock_bh(&amp_mgr_cb_list_lock);
1884
1885 return 0;
1886}
1887EXPORT_SYMBOL(hci_register_amp);
1888
1889int hci_unregister_amp(struct amp_mgr_cb *cb)
1890{
1891 BT_DBG("%p", cb);
1892
1893 write_lock_bh(&amp_mgr_cb_list_lock);
1894 list_del(&cb->list);
1895 write_unlock_bh(&amp_mgr_cb_list_lock);
1896
1897 return 0;
1898}
1899EXPORT_SYMBOL(hci_unregister_amp);
1900
1901void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1902 struct sk_buff *skb)
1903{
1904 struct amp_mgr_cb *cb;
1905
1906 BT_DBG("opcode 0x%x", opcode);
1907
1908 read_lock_bh(&amp_mgr_cb_list_lock);
1909 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1910 if (cb->amp_cmd_complete_event)
1911 cb->amp_cmd_complete_event(hdev, opcode, skb);
1912 }
1913 read_unlock_bh(&amp_mgr_cb_list_lock);
1914}
1915
1916void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1917{
1918 struct amp_mgr_cb *cb;
1919
1920 BT_DBG("opcode 0x%x, status %d", opcode, status);
1921
1922 read_lock_bh(&amp_mgr_cb_list_lock);
1923 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1924 if (cb->amp_cmd_status_event)
1925 cb->amp_cmd_status_event(hdev, opcode, status);
1926 }
1927 read_unlock_bh(&amp_mgr_cb_list_lock);
1928}
1929
1930void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1931 struct sk_buff *skb)
1932{
1933 struct amp_mgr_cb *cb;
1934
1935 BT_DBG("ev_code 0x%x", ev_code);
1936
1937 read_lock_bh(&amp_mgr_cb_list_lock);
1938 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1939 if (cb->amp_event)
1940 cb->amp_event(hdev, ev_code, skb);
1941 }
1942 read_unlock_bh(&amp_mgr_cb_list_lock);
1943}
1944
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945static int hci_send_frame(struct sk_buff *skb)
1946{
1947 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1948
1949 if (!hdev) {
1950 kfree_skb(skb);
1951 return -ENODEV;
1952 }
1953
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001954 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956 if (atomic_read(&hdev->promisc)) {
1957 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001958 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001960 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 }
1962
1963 /* Get rid of skb owner, prior to sending to the driver. */
1964 skb_orphan(skb);
1965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001966 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 return hdev->send(skb);
1968}
1969
1970/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001971int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 int len = HCI_COMMAND_HDR_SIZE + plen;
1974 struct hci_command_hdr *hdr;
1975 struct sk_buff *skb;
1976
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001977 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979 skb = bt_skb_alloc(len, GFP_ATOMIC);
1980 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001981 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 return -ENOMEM;
1983 }
1984
1985 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001986 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 hdr->plen = plen;
1988
1989 if (plen)
1990 memcpy(skb_put(skb, plen), param, plen);
1991
1992 BT_DBG("skb len %d", skb->len);
1993
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001994 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001996
Johan Hedberga5040ef2011-01-10 13:28:59 +02001997 if (test_bit(HCI_INIT, &hdev->flags))
1998 hdev->init_last_cmd = opcode;
1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002001 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002
2003 return 0;
2004}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002005EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
2007/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002008void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009{
2010 struct hci_command_hdr *hdr;
2011
2012 if (!hdev->sent_cmd)
2013 return NULL;
2014
2015 hdr = (void *) hdev->sent_cmd->data;
2016
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002017 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 return NULL;
2019
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002020 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
2022 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2023}
2024
2025/* Send ACL data */
2026static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2027{
2028 struct hci_acl_hdr *hdr;
2029 int len = skb->len;
2030
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002031 skb_push(skb, HCI_ACL_HDR_SIZE);
2032 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002033 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002034 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2035 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036}
2037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2039 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040{
2041 struct hci_dev *hdev = conn->hdev;
2042 struct sk_buff *list;
2043
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002044 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002047 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002048 if (hdev->dev_type == HCI_BREDR)
2049 hci_add_acl_hdr(skb, conn->handle, flags);
2050 else
2051 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002053 list = skb_shinfo(skb)->frag_list;
2054 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 /* Non fragmented */
2056 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2057
2058 skb_queue_tail(&conn->data_q, skb);
2059 } else {
2060 /* Fragmented */
2061 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2062
2063 skb_shinfo(skb)->frag_list = NULL;
2064
2065 /* Queue all fragments atomically */
2066 spin_lock_bh(&conn->data_q.lock);
2067
2068 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002069 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002070 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 do {
2072 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002073
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002075 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002076 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2079
2080 __skb_queue_tail(&conn->data_q, skb);
2081 } while (list);
2082
2083 spin_unlock_bh(&conn->data_q.lock);
2084 }
2085
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002086 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087}
2088EXPORT_SYMBOL(hci_send_acl);
2089
2090/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002091void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
2093 struct hci_dev *hdev = conn->hdev;
2094 struct hci_sco_hdr hdr;
2095
2096 BT_DBG("%s len %d", hdev->name, skb->len);
2097
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002098 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 hdr.dlen = skb->len;
2100
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002101 skb_push(skb, HCI_SCO_HDR_SIZE);
2102 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002103 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
2105 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002106 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002109 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110}
2111EXPORT_SYMBOL(hci_send_sco);
2112
2113/* ---- HCI TX task (outgoing data) ---- */
2114
2115/* HCI Connection scheduler */
2116static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2117{
2118 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002119 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 int num = 0, min = ~0;
2121 struct list_head *p;
2122
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002123 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 * added and removed with TX task disabled. */
2125 list_for_each(p, &h->list) {
2126 struct hci_conn *c;
2127 c = list_entry(p, struct hci_conn, list);
2128
Marcel Holtmann769be972008-07-14 20:13:49 +02002129 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002131
2132 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2133 continue;
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 num++;
2136
2137 if (c->sent < min) {
2138 min = c->sent;
2139 conn = c;
2140 }
2141 }
2142
2143 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002144 int cnt, q;
2145
2146 switch (conn->type) {
2147 case ACL_LINK:
2148 cnt = hdev->acl_cnt;
2149 break;
2150 case SCO_LINK:
2151 case ESCO_LINK:
2152 cnt = hdev->sco_cnt;
2153 break;
2154 case LE_LINK:
2155 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2156 break;
2157 default:
2158 cnt = 0;
2159 BT_ERR("Unknown link type");
2160 }
2161
2162 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 *quote = q ? q : 1;
2164 } else
2165 *quote = 0;
2166
2167 BT_DBG("conn %p quote %d", conn, *quote);
2168 return conn;
2169}
2170
Ville Tervobae1f5d2011-02-10 22:38:53 -03002171static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172{
2173 struct hci_conn_hash *h = &hdev->conn_hash;
2174 struct list_head *p;
2175 struct hci_conn *c;
2176
Ville Tervobae1f5d2011-02-10 22:38:53 -03002177 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179 /* Kill stalled connections */
2180 list_for_each(p, &h->list) {
2181 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002182 if (c->type == type && c->sent) {
2183 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 hdev->name, batostr(&c->dst));
2185 hci_acl_disconn(c, 0x13);
2186 }
2187 }
2188}
2189
2190static inline void hci_sched_acl(struct hci_dev *hdev)
2191{
2192 struct hci_conn *conn;
2193 struct sk_buff *skb;
2194 int quote;
2195
2196 BT_DBG("%s", hdev->name);
2197
2198 if (!test_bit(HCI_RAW, &hdev->flags)) {
2199 /* ACL tx timeout must be longer than maximum
2200 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002201 if (hdev->acl_cnt <= 0 &&
2202 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002203 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
2205
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002206 while (hdev->acl_cnt > 0 &&
2207 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2208 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2209 int count = 1;
2210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002213 if (hdev->flow_ctl_mode ==
2214 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2215 /* Calculate count of blocks used by
2216 * this packet
2217 */
2218 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2219 hdev->data_block_len) + 1;
2220
2221 if (count > hdev->acl_cnt)
2222 return;
2223
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002224 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 hci_send_frame(skb);
2227 hdev->acl_last_tx = jiffies;
2228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002229 hdev->acl_cnt -= count;
2230 quote -= count;
2231
2232 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 }
2234 }
2235}
2236
2237/* Schedule SCO */
2238static inline void hci_sched_sco(struct hci_dev *hdev)
2239{
2240 struct hci_conn *conn;
2241 struct sk_buff *skb;
2242 int quote;
2243
2244 BT_DBG("%s", hdev->name);
2245
2246 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2247 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2248 BT_DBG("skb %p len %d", skb, skb->len);
2249 hci_send_frame(skb);
2250
2251 conn->sent++;
2252 if (conn->sent == ~0)
2253 conn->sent = 0;
2254 }
2255 }
2256}
2257
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002258static inline void hci_sched_esco(struct hci_dev *hdev)
2259{
2260 struct hci_conn *conn;
2261 struct sk_buff *skb;
2262 int quote;
2263
2264 BT_DBG("%s", hdev->name);
2265
2266 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2267 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2268 BT_DBG("skb %p len %d", skb, skb->len);
2269 hci_send_frame(skb);
2270
2271 conn->sent++;
2272 if (conn->sent == ~0)
2273 conn->sent = 0;
2274 }
2275 }
2276}
2277
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002278static inline void hci_sched_le(struct hci_dev *hdev)
2279{
2280 struct hci_conn *conn;
2281 struct sk_buff *skb;
2282 int quote, cnt;
2283
2284 BT_DBG("%s", hdev->name);
2285
2286 if (!test_bit(HCI_RAW, &hdev->flags)) {
2287 /* LE tx timeout must be longer than maximum
2288 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002289 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002290 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002291 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002292 }
2293
2294 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2295 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2296 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2297 BT_DBG("skb %p len %d", skb, skb->len);
2298
2299 hci_send_frame(skb);
2300 hdev->le_last_tx = jiffies;
2301
2302 cnt--;
2303 conn->sent++;
2304 }
2305 }
2306 if (hdev->le_pkts)
2307 hdev->le_cnt = cnt;
2308 else
2309 hdev->acl_cnt = cnt;
2310}
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312static void hci_tx_task(unsigned long arg)
2313{
2314 struct hci_dev *hdev = (struct hci_dev *) arg;
2315 struct sk_buff *skb;
2316
2317 read_lock(&hci_task_lock);
2318
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002319 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2320 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 /* Schedule queues and send stuff to HCI driver */
2323
2324 hci_sched_acl(hdev);
2325
2326 hci_sched_sco(hdev);
2327
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002328 hci_sched_esco(hdev);
2329
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002330 hci_sched_le(hdev);
2331
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 /* Send next queued raw (unknown type) packet */
2333 while ((skb = skb_dequeue(&hdev->raw_q)))
2334 hci_send_frame(skb);
2335
2336 read_unlock(&hci_task_lock);
2337}
2338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002339/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340
2341/* ACL data packet */
2342static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2343{
2344 struct hci_acl_hdr *hdr = (void *) skb->data;
2345 struct hci_conn *conn;
2346 __u16 handle, flags;
2347
2348 skb_pull(skb, HCI_ACL_HDR_SIZE);
2349
2350 handle = __le16_to_cpu(hdr->handle);
2351 flags = hci_flags(handle);
2352 handle = hci_handle(handle);
2353
2354 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2355
2356 hdev->stat.acl_rx++;
2357
2358 hci_dev_lock(hdev);
2359 conn = hci_conn_hash_lookup_handle(hdev, handle);
2360 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if (conn) {
2363 register struct hci_proto *hp;
2364
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002365 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002366
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002368 hp = hci_proto[HCI_PROTO_L2CAP];
2369 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 hp->recv_acldata(conn, skb, flags);
2371 return;
2372 }
2373 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002374 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 hdev->name, handle);
2376 }
2377
2378 kfree_skb(skb);
2379}
2380
2381/* SCO data packet */
2382static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2383{
2384 struct hci_sco_hdr *hdr = (void *) skb->data;
2385 struct hci_conn *conn;
2386 __u16 handle;
2387
2388 skb_pull(skb, HCI_SCO_HDR_SIZE);
2389
2390 handle = __le16_to_cpu(hdr->handle);
2391
2392 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2393
2394 hdev->stat.sco_rx++;
2395
2396 hci_dev_lock(hdev);
2397 conn = hci_conn_hash_lookup_handle(hdev, handle);
2398 hci_dev_unlock(hdev);
2399
2400 if (conn) {
2401 register struct hci_proto *hp;
2402
2403 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002404 hp = hci_proto[HCI_PROTO_SCO];
2405 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 hp->recv_scodata(conn, skb);
2407 return;
2408 }
2409 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002410 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 hdev->name, handle);
2412 }
2413
2414 kfree_skb(skb);
2415}
2416
Marcel Holtmann65164552005-10-28 19:20:48 +02002417static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418{
2419 struct hci_dev *hdev = (struct hci_dev *) arg;
2420 struct sk_buff *skb;
2421
2422 BT_DBG("%s", hdev->name);
2423
2424 read_lock(&hci_task_lock);
2425
2426 while ((skb = skb_dequeue(&hdev->rx_q))) {
2427 if (atomic_read(&hdev->promisc)) {
2428 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002429 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 }
2431
2432 if (test_bit(HCI_RAW, &hdev->flags)) {
2433 kfree_skb(skb);
2434 continue;
2435 }
2436
2437 if (test_bit(HCI_INIT, &hdev->flags)) {
2438 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002439 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 case HCI_ACLDATA_PKT:
2441 case HCI_SCODATA_PKT:
2442 kfree_skb(skb);
2443 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002444 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 }
2446
2447 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002448 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 case HCI_EVENT_PKT:
2450 hci_event_packet(hdev, skb);
2451 break;
2452
2453 case HCI_ACLDATA_PKT:
2454 BT_DBG("%s ACL data packet", hdev->name);
2455 hci_acldata_packet(hdev, skb);
2456 break;
2457
2458 case HCI_SCODATA_PKT:
2459 BT_DBG("%s SCO data packet", hdev->name);
2460 hci_scodata_packet(hdev, skb);
2461 break;
2462
2463 default:
2464 kfree_skb(skb);
2465 break;
2466 }
2467 }
2468
2469 read_unlock(&hci_task_lock);
2470}
2471
2472static void hci_cmd_task(unsigned long arg)
2473{
2474 struct hci_dev *hdev = (struct hci_dev *) arg;
2475 struct sk_buff *skb;
2476
2477 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2478
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002480 if (atomic_read(&hdev->cmd_cnt)) {
2481 skb = skb_dequeue(&hdev->cmd_q);
2482 if (!skb)
2483 return;
2484
Wei Yongjun7585b972009-02-25 18:29:52 +08002485 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002487 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2488 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 atomic_dec(&hdev->cmd_cnt);
2490 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002492 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 } else {
2494 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002495 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 }
2497 }
2498}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002499
2500module_param(enable_smp, bool, 0644);
2501MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");