blob: 699284ab327c8bbf89e4c71eeacf2d8448e4d62b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
200{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200201 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800203 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200204 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Driver initialization */
209
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100216 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 skb_queue_purge(&hdev->driver_init);
219
220 /* Mandatory initialization */
221
222 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
224 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 /* Set default HCI Flow Control Mode */
233 if (hdev->dev_type == HCI_BREDR)
234 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
235 else
236 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
237
238 /* Read HCI Flow Control Mode */
239 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200242 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
245 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#if 0
248 /* Host buffer size */
249 {
250 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700251 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 cp.acl_max_pkt = cpu_to_le16(0xffff);
254 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257#endif
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (hdev->dev_type == HCI_BREDR) {
260 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 /* Read Local Supported Features */
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 /* Read BD Address */
266 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 /* Read Class of Device */
269 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Read Local Name */
272 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 /* Read Voice Setting */
275 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 /* Optional initialization */
278 /* Clear Event Filters */
279 flt_type = HCI_FLT_CLEAR_ALL;
280 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 /* Connection accept timeout ~20 secs */
283 param = cpu_to_le16(0x7d00);
284 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
285
286 bacpy(&cp.bdaddr, BDADDR_ANY);
287 cp.delete_all = 1;
288 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
289 sizeof(cp), &cp);
290 } else {
291 /* AMP initialization */
292 /* Connection accept timeout ~5 secs */
293 param = cpu_to_le16(0x1f40);
294 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
295
296 /* Read AMP Info */
297 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300301static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
302{
303 BT_DBG("%s", hdev->name);
304
305 /* Read LE buffer size */
306 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 scan = opt;
312
313 BT_DBG("%s %x", hdev->name, scan);
314
315 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 auth = opt;
322
323 BT_DBG("%s %x", hdev->name, auth);
324
325 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __u8 encrypt = opt;
332
333 BT_DBG("%s %x", hdev->name, encrypt);
334
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200335 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200336 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
340{
341 __le16 policy = cpu_to_le16(opt);
342
Marcel Holtmanna418b892008-11-30 12:17:28 +0100343 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200344
345 /* Default link policy */
346 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
347}
348
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900349/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * Device is held on return. */
351struct hci_dev *hci_dev_get(int index)
352{
353 struct hci_dev *hdev = NULL;
354 struct list_head *p;
355
356 BT_DBG("%d", index);
357
358 if (index < 0)
359 return NULL;
360
361 read_lock(&hci_dev_list_lock);
362 list_for_each(p, &hci_dev_list) {
363 struct hci_dev *d = list_entry(p, struct hci_dev, list);
364 if (d->id == index) {
365 hdev = hci_dev_hold(d);
366 break;
367 }
368 }
369 read_unlock(&hci_dev_list_lock);
370 return hdev;
371}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374/* ---- Inquiry support ---- */
375static void inquiry_cache_flush(struct hci_dev *hdev)
376{
377 struct inquiry_cache *cache = &hdev->inq_cache;
378 struct inquiry_entry *next = cache->list, *e;
379
380 BT_DBG("cache %p", cache);
381
382 cache->list = NULL;
383 while ((e = next)) {
384 next = e->next;
385 kfree(e);
386 }
387}
388
389struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_entry *e;
393
394 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
395
396 for (e = cache->list; e; e = e->next)
397 if (!bacmp(&e->data.bdaddr, bdaddr))
398 break;
399 return e;
400}
401
402void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
403{
404 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200409 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
410 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
413 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200415
416 ie->next = cache->list;
417 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200420 memcpy(&ie->data, data, sizeof(*data));
421 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 cache->timestamp = jiffies;
423}
424
425static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
426{
427 struct inquiry_cache *cache = &hdev->inq_cache;
428 struct inquiry_info *info = (struct inquiry_info *) buf;
429 struct inquiry_entry *e;
430 int copied = 0;
431
432 for (e = cache->list; e && copied < num; e = e->next, copied++) {
433 struct inquiry_data *data = &e->data;
434 bacpy(&info->bdaddr, &data->bdaddr);
435 info->pscan_rep_mode = data->pscan_rep_mode;
436 info->pscan_period_mode = data->pscan_period_mode;
437 info->pscan_mode = data->pscan_mode;
438 memcpy(info->dev_class, data->dev_class, 3);
439 info->clock_offset = data->clock_offset;
440 info++;
441 }
442
443 BT_DBG("cache %p, copied %d", cache, copied);
444 return copied;
445}
446
447static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
448{
449 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
450 struct hci_cp_inquiry cp;
451
452 BT_DBG("%s", hdev->name);
453
454 if (test_bit(HCI_INQUIRY, &hdev->flags))
455 return;
456
457 /* Start Inquiry */
458 memcpy(&cp.lap, &ir->lap, 3);
459 cp.length = ir->length;
460 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200461 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464int hci_inquiry(void __user *arg)
465{
466 __u8 __user *ptr = arg;
467 struct hci_inquiry_req ir;
468 struct hci_dev *hdev;
469 int err = 0, do_inquiry = 0, max_rsp;
470 long timeo;
471 __u8 *buf;
472
473 if (copy_from_user(&ir, ptr, sizeof(ir)))
474 return -EFAULT;
475
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200476 hdev = hci_dev_get(ir.dev_id);
477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -ENODEV;
479
480 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200482 inquiry_cache_empty(hdev) ||
483 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 inquiry_cache_flush(hdev);
485 do_inquiry = 1;
486 }
487 hci_dev_unlock_bh(hdev);
488
Marcel Holtmann04837f62006-07-03 10:02:33 +0200489 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490
491 if (do_inquiry) {
492 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
493 if (err < 0)
494 goto done;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* for unlimited number of responses we will use buffer with 255 entries */
498 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
499
500 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
501 * copy it to the user space.
502 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100503 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 err = -ENOMEM;
506 goto done;
507 }
508
509 hci_dev_lock_bh(hdev);
510 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
511 hci_dev_unlock_bh(hdev);
512
513 BT_DBG("num_rsp %d", ir.num_rsp);
514
515 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
516 ptr += sizeof(ir);
517 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
518 ir.num_rsp))
519 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900520 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 err = -EFAULT;
522
523 kfree(buf);
524
525done:
526 hci_dev_put(hdev);
527 return err;
528}
529
530/* ---- HCI ioctl helpers ---- */
531
532int hci_dev_open(__u16 dev)
533{
534 struct hci_dev *hdev;
535 int ret = 0;
536
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200537 hdev = hci_dev_get(dev);
538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return -ENODEV;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_lock(hdev);
544
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200545 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
546 ret = -ERFKILL;
547 goto done;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (test_bit(HCI_UP, &hdev->flags)) {
551 ret = -EALREADY;
552 goto done;
553 }
554
555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
556 set_bit(HCI_RAW, &hdev->flags);
557
558 if (hdev->open(hdev)) {
559 ret = -EIO;
560 goto done;
561 }
562
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 atomic_set(&hdev->cmd_cnt, 1);
565 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200566 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Marcel Holtmann04837f62006-07-03 10:02:33 +0200568 ret = __hci_request(hdev, hci_init_req, 0,
569 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300572 ret = __hci_request(hdev, hci_le_init_req, 0,
573 msecs_to_jiffies(HCI_INIT_TIMEOUT));
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 clear_bit(HCI_INIT, &hdev->flags);
576 }
577
578 if (!ret) {
579 hci_dev_hold(hdev);
580 set_bit(HCI_UP, &hdev->flags);
581 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700582 if (!test_bit(HCI_SETUP, &hdev->flags) &&
583 hdev->dev_type == HCI_BREDR)
Johan Hedberg5add6af2010-12-16 10:00:37 +0200584 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900585 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /* Init failed, cleanup */
587 tasklet_kill(&hdev->rx_task);
588 tasklet_kill(&hdev->tx_task);
589 tasklet_kill(&hdev->cmd_task);
590
591 skb_queue_purge(&hdev->cmd_q);
592 skb_queue_purge(&hdev->rx_q);
593
594 if (hdev->flush)
595 hdev->flush(hdev);
596
597 if (hdev->sent_cmd) {
598 kfree_skb(hdev->sent_cmd);
599 hdev->sent_cmd = NULL;
600 }
601
602 hdev->close(hdev);
603 hdev->flags = 0;
604 }
605
606done:
607 hci_req_unlock(hdev);
608 hci_dev_put(hdev);
609 return ret;
610}
611
612static int hci_dev_do_close(struct hci_dev *hdev)
613{
614 BT_DBG("%s %p", hdev->name, hdev);
615
616 hci_req_cancel(hdev, ENODEV);
617 hci_req_lock(hdev);
618
619 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300620 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 hci_req_unlock(hdev);
622 return 0;
623 }
624
625 /* Kill RX and TX tasks */
626 tasklet_kill(&hdev->rx_task);
627 tasklet_kill(&hdev->tx_task);
628
629 hci_dev_lock_bh(hdev);
630 inquiry_cache_flush(hdev);
631 hci_conn_hash_flush(hdev);
632 hci_dev_unlock_bh(hdev);
633
634 hci_notify(hdev, HCI_DEV_DOWN);
635
636 if (hdev->flush)
637 hdev->flush(hdev);
638
639 /* Reset device */
640 skb_queue_purge(&hdev->cmd_q);
641 atomic_set(&hdev->cmd_cnt, 1);
642 if (!test_bit(HCI_RAW, &hdev->flags)) {
643 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200644 __hci_request(hdev, hci_reset_req, 0,
645 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 clear_bit(HCI_INIT, &hdev->flags);
647 }
648
649 /* Kill cmd task */
650 tasklet_kill(&hdev->cmd_task);
651
652 /* Drop queues */
653 skb_queue_purge(&hdev->rx_q);
654 skb_queue_purge(&hdev->cmd_q);
655 skb_queue_purge(&hdev->raw_q);
656
657 /* Drop last sent command */
658 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300659 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 kfree_skb(hdev->sent_cmd);
661 hdev->sent_cmd = NULL;
662 }
663
664 /* After this point our queues are empty
665 * and no tasks are scheduled. */
666 hdev->close(hdev);
667
Peter Krystad1fc44072011-08-30 15:38:12 -0700668 if (hdev->dev_type == HCI_BREDR)
669 mgmt_powered(hdev->id, 0);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 /* Clear flags */
672 hdev->flags = 0;
673
674 hci_req_unlock(hdev);
675
676 hci_dev_put(hdev);
677 return 0;
678}
679
680int hci_dev_close(__u16 dev)
681{
682 struct hci_dev *hdev;
683 int err;
684
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200685 hdev = hci_dev_get(dev);
686 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return -ENODEV;
688 err = hci_dev_do_close(hdev);
689 hci_dev_put(hdev);
690 return err;
691}
692
693int hci_dev_reset(__u16 dev)
694{
695 struct hci_dev *hdev;
696 int ret = 0;
697
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200698 hdev = hci_dev_get(dev);
699 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 return -ENODEV;
701
702 hci_req_lock(hdev);
703 tasklet_disable(&hdev->tx_task);
704
705 if (!test_bit(HCI_UP, &hdev->flags))
706 goto done;
707
708 /* Drop queues */
709 skb_queue_purge(&hdev->rx_q);
710 skb_queue_purge(&hdev->cmd_q);
711
712 hci_dev_lock_bh(hdev);
713 inquiry_cache_flush(hdev);
714 hci_conn_hash_flush(hdev);
715 hci_dev_unlock_bh(hdev);
716
717 if (hdev->flush)
718 hdev->flush(hdev);
719
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900720 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300721 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200724 ret = __hci_request(hdev, hci_reset_req, 0,
725 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727done:
728 tasklet_enable(&hdev->tx_task);
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734int hci_dev_reset_stat(__u16 dev)
735{
736 struct hci_dev *hdev;
737 int ret = 0;
738
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200739 hdev = hci_dev_get(dev);
740 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -ENODEV;
742
743 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
744
745 hci_dev_put(hdev);
746
747 return ret;
748}
749
750int hci_dev_cmd(unsigned int cmd, void __user *arg)
751{
752 struct hci_dev *hdev;
753 struct hci_dev_req dr;
754 int err = 0;
755
756 if (copy_from_user(&dr, arg, sizeof(dr)))
757 return -EFAULT;
758
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200759 hdev = hci_dev_get(dr.dev_id);
760 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return -ENODEV;
762
763 switch (cmd) {
764 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200765 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 break;
768
769 case HCISETENCRYPT:
770 if (!lmp_encrypt_capable(hdev)) {
771 err = -EOPNOTSUPP;
772 break;
773 }
774
775 if (!test_bit(HCI_AUTH, &hdev->flags)) {
776 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200777 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (err)
780 break;
781 }
782
Marcel Holtmann04837f62006-07-03 10:02:33 +0200783 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200788 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
789 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200792 case HCISETLINKPOL:
793 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
795 break;
796
797 case HCISETLINKMODE:
798 hdev->link_mode = ((__u16) dr.dev_opt) &
799 (HCI_LM_MASTER | HCI_LM_ACCEPT);
800 break;
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 case HCISETPTYPE:
803 hdev->pkt_type = (__u16) dr.dev_opt;
804 break;
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200807 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
808 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810
811 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200812 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
813 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 break;
815
816 default:
817 err = -EINVAL;
818 break;
819 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hci_dev_put(hdev);
822 return err;
823}
824
825int hci_get_dev_list(void __user *arg)
826{
827 struct hci_dev_list_req *dl;
828 struct hci_dev_req *dr;
829 struct list_head *p;
830 int n = 0, size, err;
831 __u16 dev_num;
832
833 if (get_user(dev_num, (__u16 __user *) arg))
834 return -EFAULT;
835
836 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
837 return -EINVAL;
838
839 size = sizeof(*dl) + dev_num * sizeof(*dr);
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 dl = kzalloc(size, GFP_KERNEL);
842 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENOMEM;
844
845 dr = dl->dev_req;
846
847 read_lock_bh(&hci_dev_list_lock);
848 list_for_each(p, &hci_dev_list) {
849 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200852
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200853 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (++n >= dev_num)
862 break;
863 }
864 read_unlock_bh(&hci_dev_list_lock);
865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873}
874
875int hci_get_dev_info(void __user *arg)
876{
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 int err = 0;
880
881 if (copy_from_user(&di, arg, sizeof(di)))
882 return -EFAULT;
883
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200884 hdev = hci_dev_get(di.dev_id);
885 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 return -ENODEV;
887
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200888 hci_del_off_timer(hdev);
889
Johan Hedbergc542a062011-01-26 13:11:03 +0200890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 strcpy(di.name, hdev->name);
894 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100895 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 di.flags = hdev->flags;
897 di.pkt_type = hdev->pkt_type;
898 di.acl_mtu = hdev->acl_mtu;
899 di.acl_pkts = hdev->acl_pkts;
900 di.sco_mtu = hdev->sco_mtu;
901 di.sco_pkts = hdev->sco_pkts;
902 di.link_policy = hdev->link_policy;
903 di.link_mode = hdev->link_mode;
904
905 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
906 memcpy(&di.features, &hdev->features, sizeof(di.features));
907
908 if (copy_to_user(arg, &di, sizeof(di)))
909 err = -EFAULT;
910
911 hci_dev_put(hdev);
912
913 return err;
914}
915
916/* ---- Interface to HCI drivers ---- */
917
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200918static int hci_rfkill_set_block(void *data, bool blocked)
919{
920 struct hci_dev *hdev = data;
921
922 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
923
924 if (!blocked)
925 return 0;
926
927 hci_dev_do_close(hdev);
928
929 return 0;
930}
931
932static const struct rfkill_ops hci_rfkill_ops = {
933 .set_block = hci_rfkill_set_block,
934};
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936/* Alloc HCI device */
937struct hci_dev *hci_alloc_dev(void)
938{
939 struct hci_dev *hdev;
940
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200941 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (!hdev)
943 return NULL;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 skb_queue_head_init(&hdev->driver_init);
946
947 return hdev;
948}
949EXPORT_SYMBOL(hci_alloc_dev);
950
951/* Free HCI device */
952void hci_free_dev(struct hci_dev *hdev)
953{
954 skb_queue_purge(&hdev->driver_init);
955
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200956 /* will free via device release */
957 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959EXPORT_SYMBOL(hci_free_dev);
960
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200961static void hci_power_on(struct work_struct *work)
962{
963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700964 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200965
966 BT_DBG("%s", hdev->name);
967
Inga Stotland5029fc22011-09-12 15:22:52 -0700968 err = hci_dev_open(hdev->id);
969 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200970 return;
971
Peter Krystad1fc44072011-08-30 15:38:12 -0700972 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
973 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200974 mod_timer(&hdev->off_timer,
975 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
976
Peter Krystad1fc44072011-08-30 15:38:12 -0700977 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
978 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200979 mgmt_index_added(hdev->id);
980}
981
982static void hci_power_off(struct work_struct *work)
983{
984 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
985
986 BT_DBG("%s", hdev->name);
987
988 hci_dev_close(hdev->id);
989}
990
991static void hci_auto_off(unsigned long data)
992{
993 struct hci_dev *hdev = (struct hci_dev *) data;
994
995 BT_DBG("%s", hdev->name);
996
997 clear_bit(HCI_AUTO_OFF, &hdev->flags);
998
999 queue_work(hdev->workqueue, &hdev->power_off);
1000}
1001
1002void hci_del_off_timer(struct hci_dev *hdev)
1003{
1004 BT_DBG("%s", hdev->name);
1005
1006 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1007 del_timer(&hdev->off_timer);
1008}
1009
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001010int hci_uuids_clear(struct hci_dev *hdev)
1011{
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->uuids) {
1015 struct bt_uuid *uuid;
1016
1017 uuid = list_entry(p, struct bt_uuid, list);
1018
1019 list_del(p);
1020 kfree(uuid);
1021 }
1022
1023 return 0;
1024}
1025
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001026int hci_link_keys_clear(struct hci_dev *hdev)
1027{
1028 struct list_head *p, *n;
1029
1030 list_for_each_safe(p, n, &hdev->link_keys) {
1031 struct link_key *key;
1032
1033 key = list_entry(p, struct link_key, list);
1034
1035 list_del(p);
1036 kfree(key);
1037 }
1038
1039 return 0;
1040}
1041
1042struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1043{
1044 struct list_head *p;
1045
1046 list_for_each(p, &hdev->link_keys) {
1047 struct link_key *k;
1048
1049 k = list_entry(p, struct link_key, list);
1050
1051 if (bacmp(bdaddr, &k->bdaddr) == 0)
1052 return k;
1053 }
1054
1055 return NULL;
1056}
1057
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001058struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1059{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001062 list_for_each(p, &hdev->link_keys) {
1063 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001064 struct key_master_id *id;
1065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 k = list_entry(p, struct link_key, list);
1067
1068 if (k->type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001069 continue;
1070
1071 if (k->dlen != sizeof(*id))
1072 continue;
1073
1074 id = (void *) &k->data;
1075 if (id->ediv == ediv &&
1076 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1077 return k;
1078 }
1079
1080 return NULL;
1081}
1082EXPORT_SYMBOL(hci_find_ltk);
1083
1084struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1085 bdaddr_t *bdaddr, u8 type)
1086{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 list_for_each(p, &hdev->link_keys) {
1090 struct link_key *k;
1091
1092 k = list_entry(p, struct link_key, list);
1093
1094 if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001095 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001097
1098 return NULL;
1099}
1100EXPORT_SYMBOL(hci_find_link_key_type);
1101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001102int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1103 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001104{
1105 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001106 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001108 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001109
1110 old_key = hci_find_link_key(hdev, bdaddr);
1111 if (old_key) {
1112 old_key_type = old_key->type;
1113 key = old_key;
1114 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001115 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001116 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1117 if (!key)
1118 return -ENOMEM;
1119 list_add(&key->list, &hdev->link_keys);
1120 }
1121
1122 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1123
1124 bacpy(&key->bdaddr, bdaddr);
1125 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001126 key->auth = 0x01;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001127 key->type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001128 key->pin_len = pin_len;
1129
Brian Gixa68668b2011-08-11 15:49:36 -07001130 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301131 /* Store the link key persistently if one of the following is true:
1132 * 1. the remote side is using dedicated bonding since in that case
1133 * also the local requirements are set to dedicated bonding
1134 * 2. the local side had dedicated bonding as a requirement
1135 * 3. this is a legacy link key
1136 * 4. this is a changed combination key and there was a previously
1137 * stored one
1138 * If none of the above match only keep the link key around for
1139 * this connection and set the temporary flag for the device.
1140 */
Brian Gixa68668b2011-08-11 15:49:36 -07001141
Brian Gixdfdd9362011-08-18 09:58:02 -07001142 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301143 if ((conn->remote_auth > 0x01) ||
1144 (conn->auth_initiator && conn->auth_type > 0x01) ||
1145 (key->type < 0x03) ||
1146 (key->type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001147 bonded = 1;
1148 }
Brian Gixa68668b2011-08-11 15:49:36 -07001149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001151 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152
1153 if (type == 0x06)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001154 key->type = old_key_type;
1155
1156 return 0;
1157}
1158
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001159int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixa68668b2011-08-11 15:49:36 -07001160 u8 key_size, u8 auth, __le16 ediv, u8 rand[8],
1161 u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001162{
1163 struct link_key *key, *old_key;
1164 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001165
Brian Gixa68668b2011-08-11 15:49:36 -07001166 BT_DBG("%s Auth: %2.2X addr %s", hdev->name, auth, batostr(bdaddr));
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001169 if (old_key) {
1170 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001171 } else {
1172 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1173 if (!key)
1174 return -ENOMEM;
1175 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001176 }
1177
1178 key->dlen = sizeof(*id);
1179
1180 bacpy(&key->bdaddr, bdaddr);
1181 memcpy(key->val, ltk, sizeof(key->val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 key->type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001183 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001184 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001185
1186 id = (void *) &key->data;
1187 id->ediv = ediv;
1188 memcpy(id->rand, rand, sizeof(id->rand));
1189
1190 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001191 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001192
1193 return 0;
1194}
1195
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001196int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1197{
1198 struct link_key *key;
1199
1200 key = hci_find_link_key(hdev, bdaddr);
1201 if (!key)
1202 return -ENOENT;
1203
1204 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1205
1206 list_del(&key->list);
1207 kfree(key);
1208
1209 return 0;
1210}
1211
Ville Tervo6bd32322011-02-16 16:32:41 +02001212/* HCI command timer function */
1213static void hci_cmd_timer(unsigned long arg)
1214{
1215 struct hci_dev *hdev = (void *) arg;
1216
1217 BT_ERR("%s command tx timeout", hdev->name);
1218 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001220 tasklet_schedule(&hdev->cmd_task);
1221}
1222
Szymon Janc2763eda2011-03-22 13:12:22 +01001223struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1224 bdaddr_t *bdaddr)
1225{
1226 struct oob_data *data;
1227
1228 list_for_each_entry(data, &hdev->remote_oob_data, list)
1229 if (bacmp(bdaddr, &data->bdaddr) == 0)
1230 return data;
1231
1232 return NULL;
1233}
1234
1235int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1236{
1237 struct oob_data *data;
1238
1239 data = hci_find_remote_oob_data(hdev, bdaddr);
1240 if (!data)
1241 return -ENOENT;
1242
1243 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1244
1245 list_del(&data->list);
1246 kfree(data);
1247
1248 return 0;
1249}
1250
1251int hci_remote_oob_data_clear(struct hci_dev *hdev)
1252{
1253 struct oob_data *data, *n;
1254
1255 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1256 list_del(&data->list);
1257 kfree(data);
1258 }
1259
1260 return 0;
1261}
1262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001263static void hci_adv_clear(unsigned long arg)
1264{
1265 struct hci_dev *hdev = (void *) arg;
1266
1267 hci_adv_entries_clear(hdev);
1268}
1269
1270int hci_adv_entries_clear(struct hci_dev *hdev)
1271{
1272 struct list_head *p, *n;
1273
Brian Gixa68668b2011-08-11 15:49:36 -07001274 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001275 write_lock_bh(&hdev->adv_entries_lock);
1276
1277 list_for_each_safe(p, n, &hdev->adv_entries) {
1278 struct adv_entry *entry;
1279
1280 entry = list_entry(p, struct adv_entry, list);
1281
1282 list_del(p);
1283 kfree(entry);
1284 }
1285
1286 write_unlock_bh(&hdev->adv_entries_lock);
1287
1288 return 0;
1289}
1290
1291struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1292{
1293 struct list_head *p;
1294 struct adv_entry *res = NULL;
1295
Brian Gixa68668b2011-08-11 15:49:36 -07001296 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001297 read_lock_bh(&hdev->adv_entries_lock);
1298
1299 list_for_each(p, &hdev->adv_entries) {
1300 struct adv_entry *entry;
1301
1302 entry = list_entry(p, struct adv_entry, list);
1303
1304 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1305 res = entry;
1306 goto out;
1307 }
1308 }
1309out:
1310 read_unlock_bh(&hdev->adv_entries_lock);
1311 return res;
1312}
1313
1314static inline int is_connectable_adv(u8 evt_type)
1315{
1316 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1317 return 1;
1318
1319 return 0;
1320}
1321
Szymon Janc2763eda2011-03-22 13:12:22 +01001322int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1323 u8 *randomizer)
1324{
1325 struct oob_data *data;
1326
1327 data = hci_find_remote_oob_data(hdev, bdaddr);
1328
1329 if (!data) {
1330 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1331 if (!data)
1332 return -ENOMEM;
1333
1334 bacpy(&data->bdaddr, bdaddr);
1335 list_add(&data->list, &hdev->remote_oob_data);
1336 }
1337
1338 memcpy(data->hash, hash, sizeof(data->hash));
1339 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1340
1341 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1342
1343 return 0;
1344}
1345
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001346int hci_add_adv_entry(struct hci_dev *hdev,
1347 struct hci_ev_le_advertising_info *ev)
1348{
1349 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001350 u8 flags = 0;
1351 int i;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001352
Brian Gixa68668b2011-08-11 15:49:36 -07001353 BT_DBG("");
1354
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001355 if (!is_connectable_adv(ev->evt_type))
1356 return -EINVAL;
1357
Brian Gixfdd38922011-09-28 16:23:48 -07001358 if (ev->data && ev->length) {
1359 for (i = 0; (i + 2) < ev->length; i++)
1360 if (ev->data[i+1] == 0x01) {
1361 flags = ev->data[i+2];
1362 BT_DBG("flags: %2.2x", flags);
1363 break;
1364 } else {
1365 i += ev->data[i];
1366 }
1367 }
1368
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001369 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001370 /* Only new entries should be added to adv_entries. So, if
1371 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001372 if (entry) {
1373 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001374 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001375 }
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001376
1377 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1378 if (!entry)
1379 return -ENOMEM;
1380
1381 bacpy(&entry->bdaddr, &ev->bdaddr);
1382 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001383 entry->flags = flags;
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001386 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001388
1389 return 0;
1390}
1391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001392static struct crypto_blkcipher *alloc_cypher(void)
1393{
1394 if (enable_smp)
1395 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1396
1397 return ERR_PTR(-ENOTSUPP);
1398}
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400/* Register HCI device */
1401int hci_register_dev(struct hci_dev *hdev)
1402{
1403 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001404 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001406 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1407 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
1409 if (!hdev->open || !hdev->close || !hdev->destruct)
1410 return -EINVAL;
1411
Peter Krystad462bf762011-09-19 14:20:20 -07001412 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 write_lock_bh(&hci_dev_list_lock);
1415
1416 /* Find first available device id */
1417 list_for_each(p, &hci_dev_list) {
1418 if (list_entry(p, struct hci_dev, list)->id != id)
1419 break;
1420 head = p; id++;
1421 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 sprintf(hdev->name, "hci%d", id);
1424 hdev->id = id;
1425 list_add(&hdev->list, head);
1426
1427 atomic_set(&hdev->refcnt, 1);
1428 spin_lock_init(&hdev->lock);
1429
1430 hdev->flags = 0;
1431 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001432 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001434 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
Marcel Holtmann04837f62006-07-03 10:02:33 +02001436 hdev->idle_timeout = 0;
1437 hdev->sniff_max_interval = 800;
1438 hdev->sniff_min_interval = 80;
1439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001440 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1442 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1443
1444 skb_queue_head_init(&hdev->rx_q);
1445 skb_queue_head_init(&hdev->cmd_q);
1446 skb_queue_head_init(&hdev->raw_q);
1447
Ville Tervo6bd32322011-02-16 16:32:41 +02001448 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1449
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301450 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001451 hdev->reassembly[i] = NULL;
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001454 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 inquiry_cache_init(hdev);
1457
1458 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001459 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
David Millerea4bd8b2010-07-30 21:54:49 -07001461 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001462
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001463 INIT_LIST_HEAD(&hdev->uuids);
1464
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001465 INIT_LIST_HEAD(&hdev->link_keys);
1466
Szymon Janc2763eda2011-03-22 13:12:22 +01001467 INIT_LIST_HEAD(&hdev->remote_oob_data);
1468
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001469 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001470 rwlock_init(&hdev->adv_entries_lock);
1471 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001472
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001473 INIT_WORK(&hdev->power_on, hci_power_on);
1474 INIT_WORK(&hdev->power_off, hci_power_off);
1475 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1478
1479 atomic_set(&hdev->promisc, 0);
1480
1481 write_unlock_bh(&hci_dev_list_lock);
1482
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001483 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1484 if (!hdev->workqueue)
1485 goto nomem;
1486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001488 if (IS_ERR(hdev->tfm))
1489 BT_INFO("Failed to load transform for ecb(aes): %ld",
1490 PTR_ERR(hdev->tfm));
1491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 hci_register_sysfs(hdev);
1493
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001494 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1495 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1496 if (hdev->rfkill) {
1497 if (rfkill_register(hdev->rfkill) < 0) {
1498 rfkill_destroy(hdev->rfkill);
1499 hdev->rfkill = NULL;
1500 }
1501 }
1502
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001503 set_bit(HCI_AUTO_OFF, &hdev->flags);
1504 set_bit(HCI_SETUP, &hdev->flags);
1505 queue_work(hdev->workqueue, &hdev->power_on);
1506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 hci_notify(hdev, HCI_DEV_REG);
1508
1509 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001510
1511nomem:
1512 write_lock_bh(&hci_dev_list_lock);
1513 list_del(&hdev->list);
1514 write_unlock_bh(&hci_dev_list_lock);
1515
1516 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517}
1518EXPORT_SYMBOL(hci_register_dev);
1519
1520/* Unregister HCI device */
1521int hci_unregister_dev(struct hci_dev *hdev)
1522{
Marcel Holtmannef222012007-07-11 06:42:04 +02001523 int i;
1524
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001525 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 write_lock_bh(&hci_dev_list_lock);
1528 list_del(&hdev->list);
1529 write_unlock_bh(&hci_dev_list_lock);
1530
1531 hci_dev_do_close(hdev);
1532
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301533 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001534 kfree_skb(hdev->reassembly[i]);
1535
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001536 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001537 !test_bit(HCI_SETUP, &hdev->flags) &&
1538 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001539 mgmt_index_removed(hdev->id);
1540
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001541 if (!IS_ERR(hdev->tfm))
1542 crypto_free_blkcipher(hdev->tfm);
1543
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 hci_notify(hdev, HCI_DEV_UNREG);
1545
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001546 if (hdev->rfkill) {
1547 rfkill_unregister(hdev->rfkill);
1548 rfkill_destroy(hdev->rfkill);
1549 }
1550
Dave Young147e2d52008-03-05 18:45:59 -08001551 hci_unregister_sysfs(hdev);
1552
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001553 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001554 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001555
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001556 destroy_workqueue(hdev->workqueue);
1557
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001558 hci_dev_lock_bh(hdev);
1559 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001560 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001561 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001562 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001563 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001564 hci_dev_unlock_bh(hdev);
1565
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 return 0;
1569}
1570EXPORT_SYMBOL(hci_unregister_dev);
1571
1572/* Suspend HCI device */
1573int hci_suspend_dev(struct hci_dev *hdev)
1574{
1575 hci_notify(hdev, HCI_DEV_SUSPEND);
1576 return 0;
1577}
1578EXPORT_SYMBOL(hci_suspend_dev);
1579
1580/* Resume HCI device */
1581int hci_resume_dev(struct hci_dev *hdev)
1582{
1583 hci_notify(hdev, HCI_DEV_RESUME);
1584 return 0;
1585}
1586EXPORT_SYMBOL(hci_resume_dev);
1587
Marcel Holtmann76bca882009-11-18 00:40:39 +01001588/* Receive frame from HCI drivers */
1589int hci_recv_frame(struct sk_buff *skb)
1590{
1591 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1592 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1593 && !test_bit(HCI_INIT, &hdev->flags))) {
1594 kfree_skb(skb);
1595 return -ENXIO;
1596 }
1597
1598 /* Incomming skb */
1599 bt_cb(skb)->incoming = 1;
1600
1601 /* Time stamp */
1602 __net_timestamp(skb);
1603
1604 /* Queue frame for rx task */
1605 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001606 tasklet_schedule(&hdev->rx_task);
1607
Marcel Holtmann76bca882009-11-18 00:40:39 +01001608 return 0;
1609}
1610EXPORT_SYMBOL(hci_recv_frame);
1611
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301612static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001613 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301614{
1615 int len = 0;
1616 int hlen = 0;
1617 int remain = count;
1618 struct sk_buff *skb;
1619 struct bt_skb_cb *scb;
1620
1621 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1622 index >= NUM_REASSEMBLY)
1623 return -EILSEQ;
1624
1625 skb = hdev->reassembly[index];
1626
1627 if (!skb) {
1628 switch (type) {
1629 case HCI_ACLDATA_PKT:
1630 len = HCI_MAX_FRAME_SIZE;
1631 hlen = HCI_ACL_HDR_SIZE;
1632 break;
1633 case HCI_EVENT_PKT:
1634 len = HCI_MAX_EVENT_SIZE;
1635 hlen = HCI_EVENT_HDR_SIZE;
1636 break;
1637 case HCI_SCODATA_PKT:
1638 len = HCI_MAX_SCO_SIZE;
1639 hlen = HCI_SCO_HDR_SIZE;
1640 break;
1641 }
1642
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001643 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301644 if (!skb)
1645 return -ENOMEM;
1646
1647 scb = (void *) skb->cb;
1648 scb->expect = hlen;
1649 scb->pkt_type = type;
1650
1651 skb->dev = (void *) hdev;
1652 hdev->reassembly[index] = skb;
1653 }
1654
1655 while (count) {
1656 scb = (void *) skb->cb;
1657 len = min(scb->expect, (__u16)count);
1658
1659 memcpy(skb_put(skb, len), data, len);
1660
1661 count -= len;
1662 data += len;
1663 scb->expect -= len;
1664 remain = count;
1665
1666 switch (type) {
1667 case HCI_EVENT_PKT:
1668 if (skb->len == HCI_EVENT_HDR_SIZE) {
1669 struct hci_event_hdr *h = hci_event_hdr(skb);
1670 scb->expect = h->plen;
1671
1672 if (skb_tailroom(skb) < scb->expect) {
1673 kfree_skb(skb);
1674 hdev->reassembly[index] = NULL;
1675 return -ENOMEM;
1676 }
1677 }
1678 break;
1679
1680 case HCI_ACLDATA_PKT:
1681 if (skb->len == HCI_ACL_HDR_SIZE) {
1682 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1683 scb->expect = __le16_to_cpu(h->dlen);
1684
1685 if (skb_tailroom(skb) < scb->expect) {
1686 kfree_skb(skb);
1687 hdev->reassembly[index] = NULL;
1688 return -ENOMEM;
1689 }
1690 }
1691 break;
1692
1693 case HCI_SCODATA_PKT:
1694 if (skb->len == HCI_SCO_HDR_SIZE) {
1695 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1696 scb->expect = h->dlen;
1697
1698 if (skb_tailroom(skb) < scb->expect) {
1699 kfree_skb(skb);
1700 hdev->reassembly[index] = NULL;
1701 return -ENOMEM;
1702 }
1703 }
1704 break;
1705 }
1706
1707 if (scb->expect == 0) {
1708 /* Complete frame */
1709
1710 bt_cb(skb)->pkt_type = type;
1711 hci_recv_frame(skb);
1712
1713 hdev->reassembly[index] = NULL;
1714 return remain;
1715 }
1716 }
1717
1718 return remain;
1719}
1720
Marcel Holtmannef222012007-07-11 06:42:04 +02001721int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1722{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301723 int rem = 0;
1724
Marcel Holtmannef222012007-07-11 06:42:04 +02001725 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1726 return -EILSEQ;
1727
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001728 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001729 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301730 if (rem < 0)
1731 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001732
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301733 data += (count - rem);
1734 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001735 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001736
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301737 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001738}
1739EXPORT_SYMBOL(hci_recv_fragment);
1740
Suraj Sumangala99811512010-07-14 13:02:19 +05301741#define STREAM_REASSEMBLY 0
1742
1743int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1744{
1745 int type;
1746 int rem = 0;
1747
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001748 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301749 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1750
1751 if (!skb) {
1752 struct { char type; } *pkt;
1753
1754 /* Start of the frame */
1755 pkt = data;
1756 type = pkt->type;
1757
1758 data++;
1759 count--;
1760 } else
1761 type = bt_cb(skb)->pkt_type;
1762
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001763 rem = hci_reassembly(hdev, type, data, count,
1764 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301765 if (rem < 0)
1766 return rem;
1767
1768 data += (count - rem);
1769 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001770 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301771
1772 return rem;
1773}
1774EXPORT_SYMBOL(hci_recv_stream_fragment);
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776/* ---- Interface to upper protocols ---- */
1777
1778/* Register/Unregister protocols.
1779 * hci_task_lock is used to ensure that no tasks are running. */
1780int hci_register_proto(struct hci_proto *hp)
1781{
1782 int err = 0;
1783
1784 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1785
1786 if (hp->id >= HCI_MAX_PROTO)
1787 return -EINVAL;
1788
1789 write_lock_bh(&hci_task_lock);
1790
1791 if (!hci_proto[hp->id])
1792 hci_proto[hp->id] = hp;
1793 else
1794 err = -EEXIST;
1795
1796 write_unlock_bh(&hci_task_lock);
1797
1798 return err;
1799}
1800EXPORT_SYMBOL(hci_register_proto);
1801
1802int hci_unregister_proto(struct hci_proto *hp)
1803{
1804 int err = 0;
1805
1806 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808 if (hp->id >= HCI_MAX_PROTO)
1809 return -EINVAL;
1810
1811 write_lock_bh(&hci_task_lock);
1812
1813 if (hci_proto[hp->id])
1814 hci_proto[hp->id] = NULL;
1815 else
1816 err = -ENOENT;
1817
1818 write_unlock_bh(&hci_task_lock);
1819
1820 return err;
1821}
1822EXPORT_SYMBOL(hci_unregister_proto);
1823
1824int hci_register_cb(struct hci_cb *cb)
1825{
1826 BT_DBG("%p name %s", cb, cb->name);
1827
1828 write_lock_bh(&hci_cb_list_lock);
1829 list_add(&cb->list, &hci_cb_list);
1830 write_unlock_bh(&hci_cb_list_lock);
1831
1832 return 0;
1833}
1834EXPORT_SYMBOL(hci_register_cb);
1835
1836int hci_unregister_cb(struct hci_cb *cb)
1837{
1838 BT_DBG("%p name %s", cb, cb->name);
1839
1840 write_lock_bh(&hci_cb_list_lock);
1841 list_del(&cb->list);
1842 write_unlock_bh(&hci_cb_list_lock);
1843
1844 return 0;
1845}
1846EXPORT_SYMBOL(hci_unregister_cb);
1847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001848int hci_register_amp(struct amp_mgr_cb *cb)
1849{
1850 BT_DBG("%p", cb);
1851
1852 write_lock_bh(&amp_mgr_cb_list_lock);
1853 list_add(&cb->list, &amp_mgr_cb_list);
1854 write_unlock_bh(&amp_mgr_cb_list_lock);
1855
1856 return 0;
1857}
1858EXPORT_SYMBOL(hci_register_amp);
1859
1860int hci_unregister_amp(struct amp_mgr_cb *cb)
1861{
1862 BT_DBG("%p", cb);
1863
1864 write_lock_bh(&amp_mgr_cb_list_lock);
1865 list_del(&cb->list);
1866 write_unlock_bh(&amp_mgr_cb_list_lock);
1867
1868 return 0;
1869}
1870EXPORT_SYMBOL(hci_unregister_amp);
1871
1872void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1873 struct sk_buff *skb)
1874{
1875 struct amp_mgr_cb *cb;
1876
1877 BT_DBG("opcode 0x%x", opcode);
1878
1879 read_lock_bh(&amp_mgr_cb_list_lock);
1880 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1881 if (cb->amp_cmd_complete_event)
1882 cb->amp_cmd_complete_event(hdev, opcode, skb);
1883 }
1884 read_unlock_bh(&amp_mgr_cb_list_lock);
1885}
1886
1887void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1888{
1889 struct amp_mgr_cb *cb;
1890
1891 BT_DBG("opcode 0x%x, status %d", opcode, status);
1892
1893 read_lock_bh(&amp_mgr_cb_list_lock);
1894 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1895 if (cb->amp_cmd_status_event)
1896 cb->amp_cmd_status_event(hdev, opcode, status);
1897 }
1898 read_unlock_bh(&amp_mgr_cb_list_lock);
1899}
1900
1901void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1902 struct sk_buff *skb)
1903{
1904 struct amp_mgr_cb *cb;
1905
1906 BT_DBG("ev_code 0x%x", ev_code);
1907
1908 read_lock_bh(&amp_mgr_cb_list_lock);
1909 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1910 if (cb->amp_event)
1911 cb->amp_event(hdev, ev_code, skb);
1912 }
1913 read_unlock_bh(&amp_mgr_cb_list_lock);
1914}
1915
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916static int hci_send_frame(struct sk_buff *skb)
1917{
1918 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1919
1920 if (!hdev) {
1921 kfree_skb(skb);
1922 return -ENODEV;
1923 }
1924
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001925 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 if (atomic_read(&hdev->promisc)) {
1928 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001929 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001931 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 }
1933
1934 /* Get rid of skb owner, prior to sending to the driver. */
1935 skb_orphan(skb);
1936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 return hdev->send(skb);
1939}
1940
1941/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001942int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943{
1944 int len = HCI_COMMAND_HDR_SIZE + plen;
1945 struct hci_command_hdr *hdr;
1946 struct sk_buff *skb;
1947
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001948 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 skb = bt_skb_alloc(len, GFP_ATOMIC);
1951 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001952 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return -ENOMEM;
1954 }
1955
1956 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001957 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 hdr->plen = plen;
1959
1960 if (plen)
1961 memcpy(skb_put(skb, plen), param, plen);
1962
1963 BT_DBG("skb len %d", skb->len);
1964
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001965 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001967
Johan Hedberga5040ef2011-01-10 13:28:59 +02001968 if (test_bit(HCI_INIT, &hdev->flags))
1969 hdev->init_last_cmd = opcode;
1970
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001972 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973
1974 return 0;
1975}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
1978/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001979void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
1981 struct hci_command_hdr *hdr;
1982
1983 if (!hdev->sent_cmd)
1984 return NULL;
1985
1986 hdr = (void *) hdev->sent_cmd->data;
1987
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001988 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 return NULL;
1990
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001991 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
1993 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1994}
1995
1996/* Send ACL data */
1997static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1998{
1999 struct hci_acl_hdr *hdr;
2000 int len = skb->len;
2001
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002002 skb_push(skb, HCI_ACL_HDR_SIZE);
2003 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002004 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002005 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2006 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007}
2008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2010 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011{
2012 struct hci_dev *hdev = conn->hdev;
2013 struct sk_buff *list;
2014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002018 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 if (hdev->dev_type == HCI_BREDR)
2020 hci_add_acl_hdr(skb, conn->handle, flags);
2021 else
2022 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002024 list = skb_shinfo(skb)->frag_list;
2025 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 /* Non fragmented */
2027 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2028
2029 skb_queue_tail(&conn->data_q, skb);
2030 } else {
2031 /* Fragmented */
2032 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2033
2034 skb_shinfo(skb)->frag_list = NULL;
2035
2036 /* Queue all fragments atomically */
2037 spin_lock_bh(&conn->data_q.lock);
2038
2039 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002041 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 do {
2043 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002044
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002046 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002047 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2050
2051 __skb_queue_tail(&conn->data_q, skb);
2052 } while (list);
2053
2054 spin_unlock_bh(&conn->data_q.lock);
2055 }
2056
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002057 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058}
2059EXPORT_SYMBOL(hci_send_acl);
2060
2061/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002062void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063{
2064 struct hci_dev *hdev = conn->hdev;
2065 struct hci_sco_hdr hdr;
2066
2067 BT_DBG("%s len %d", hdev->name, skb->len);
2068
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002069 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 hdr.dlen = skb->len;
2071
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002072 skb_push(skb, HCI_SCO_HDR_SIZE);
2073 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002074 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
2076 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002077 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002080 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081}
2082EXPORT_SYMBOL(hci_send_sco);
2083
2084/* ---- HCI TX task (outgoing data) ---- */
2085
2086/* HCI Connection scheduler */
2087static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2088{
2089 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002090 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 int num = 0, min = ~0;
2092 struct list_head *p;
2093
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002094 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 * added and removed with TX task disabled. */
2096 list_for_each(p, &h->list) {
2097 struct hci_conn *c;
2098 c = list_entry(p, struct hci_conn, list);
2099
Marcel Holtmann769be972008-07-14 20:13:49 +02002100 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002102
2103 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2104 continue;
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 num++;
2107
2108 if (c->sent < min) {
2109 min = c->sent;
2110 conn = c;
2111 }
2112 }
2113
2114 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002115 int cnt, q;
2116
2117 switch (conn->type) {
2118 case ACL_LINK:
2119 cnt = hdev->acl_cnt;
2120 break;
2121 case SCO_LINK:
2122 case ESCO_LINK:
2123 cnt = hdev->sco_cnt;
2124 break;
2125 case LE_LINK:
2126 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2127 break;
2128 default:
2129 cnt = 0;
2130 BT_ERR("Unknown link type");
2131 }
2132
2133 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 *quote = q ? q : 1;
2135 } else
2136 *quote = 0;
2137
2138 BT_DBG("conn %p quote %d", conn, *quote);
2139 return conn;
2140}
2141
Ville Tervobae1f5d2011-02-10 22:38:53 -03002142static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
2144 struct hci_conn_hash *h = &hdev->conn_hash;
2145 struct list_head *p;
2146 struct hci_conn *c;
2147
Ville Tervobae1f5d2011-02-10 22:38:53 -03002148 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 /* Kill stalled connections */
2151 list_for_each(p, &h->list) {
2152 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002153 if (c->type == type && c->sent) {
2154 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 hdev->name, batostr(&c->dst));
2156 hci_acl_disconn(c, 0x13);
2157 }
2158 }
2159}
2160
2161static inline void hci_sched_acl(struct hci_dev *hdev)
2162{
2163 struct hci_conn *conn;
2164 struct sk_buff *skb;
2165 int quote;
2166
2167 BT_DBG("%s", hdev->name);
2168
2169 if (!test_bit(HCI_RAW, &hdev->flags)) {
2170 /* ACL tx timeout must be longer than maximum
2171 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002172 if (hdev->acl_cnt <= 0 &&
2173 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002174 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 }
2176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002177 while (hdev->acl_cnt > 0 &&
2178 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2179 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2180 int count = 1;
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002184 if (hdev->flow_ctl_mode ==
2185 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2186 /* Calculate count of blocks used by
2187 * this packet
2188 */
2189 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2190 hdev->data_block_len) + 1;
2191
2192 if (count > hdev->acl_cnt)
2193 return;
2194
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002195 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 hci_send_frame(skb);
2198 hdev->acl_last_tx = jiffies;
2199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002200 hdev->acl_cnt -= count;
2201 quote -= count;
2202
2203 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
2205 }
2206}
2207
2208/* Schedule SCO */
2209static inline void hci_sched_sco(struct hci_dev *hdev)
2210{
2211 struct hci_conn *conn;
2212 struct sk_buff *skb;
2213 int quote;
2214
2215 BT_DBG("%s", hdev->name);
2216
2217 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2218 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2219 BT_DBG("skb %p len %d", skb, skb->len);
2220 hci_send_frame(skb);
2221
2222 conn->sent++;
2223 if (conn->sent == ~0)
2224 conn->sent = 0;
2225 }
2226 }
2227}
2228
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002229static inline void hci_sched_esco(struct hci_dev *hdev)
2230{
2231 struct hci_conn *conn;
2232 struct sk_buff *skb;
2233 int quote;
2234
2235 BT_DBG("%s", hdev->name);
2236
2237 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2238 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2239 BT_DBG("skb %p len %d", skb, skb->len);
2240 hci_send_frame(skb);
2241
2242 conn->sent++;
2243 if (conn->sent == ~0)
2244 conn->sent = 0;
2245 }
2246 }
2247}
2248
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002249static inline void hci_sched_le(struct hci_dev *hdev)
2250{
2251 struct hci_conn *conn;
2252 struct sk_buff *skb;
2253 int quote, cnt;
2254
2255 BT_DBG("%s", hdev->name);
2256
2257 if (!test_bit(HCI_RAW, &hdev->flags)) {
2258 /* LE tx timeout must be longer than maximum
2259 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002260 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002261 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002262 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002263 }
2264
2265 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2266 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2267 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2268 BT_DBG("skb %p len %d", skb, skb->len);
2269
2270 hci_send_frame(skb);
2271 hdev->le_last_tx = jiffies;
2272
2273 cnt--;
2274 conn->sent++;
2275 }
2276 }
2277 if (hdev->le_pkts)
2278 hdev->le_cnt = cnt;
2279 else
2280 hdev->acl_cnt = cnt;
2281}
2282
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283static void hci_tx_task(unsigned long arg)
2284{
2285 struct hci_dev *hdev = (struct hci_dev *) arg;
2286 struct sk_buff *skb;
2287
2288 read_lock(&hci_task_lock);
2289
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002290 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2291 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 /* Schedule queues and send stuff to HCI driver */
2294
2295 hci_sched_acl(hdev);
2296
2297 hci_sched_sco(hdev);
2298
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002299 hci_sched_esco(hdev);
2300
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002301 hci_sched_le(hdev);
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 /* Send next queued raw (unknown type) packet */
2304 while ((skb = skb_dequeue(&hdev->raw_q)))
2305 hci_send_frame(skb);
2306
2307 read_unlock(&hci_task_lock);
2308}
2309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002310/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
2312/* ACL data packet */
2313static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2314{
2315 struct hci_acl_hdr *hdr = (void *) skb->data;
2316 struct hci_conn *conn;
2317 __u16 handle, flags;
2318
2319 skb_pull(skb, HCI_ACL_HDR_SIZE);
2320
2321 handle = __le16_to_cpu(hdr->handle);
2322 flags = hci_flags(handle);
2323 handle = hci_handle(handle);
2324
2325 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2326
2327 hdev->stat.acl_rx++;
2328
2329 hci_dev_lock(hdev);
2330 conn = hci_conn_hash_lookup_handle(hdev, handle);
2331 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002332
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 if (conn) {
2334 register struct hci_proto *hp;
2335
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002336 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002337
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002339 hp = hci_proto[HCI_PROTO_L2CAP];
2340 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 hp->recv_acldata(conn, skb, flags);
2342 return;
2343 }
2344 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002345 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 hdev->name, handle);
2347 }
2348
2349 kfree_skb(skb);
2350}
2351
2352/* SCO data packet */
2353static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2354{
2355 struct hci_sco_hdr *hdr = (void *) skb->data;
2356 struct hci_conn *conn;
2357 __u16 handle;
2358
2359 skb_pull(skb, HCI_SCO_HDR_SIZE);
2360
2361 handle = __le16_to_cpu(hdr->handle);
2362
2363 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2364
2365 hdev->stat.sco_rx++;
2366
2367 hci_dev_lock(hdev);
2368 conn = hci_conn_hash_lookup_handle(hdev, handle);
2369 hci_dev_unlock(hdev);
2370
2371 if (conn) {
2372 register struct hci_proto *hp;
2373
2374 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002375 hp = hci_proto[HCI_PROTO_SCO];
2376 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 hp->recv_scodata(conn, skb);
2378 return;
2379 }
2380 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002381 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 hdev->name, handle);
2383 }
2384
2385 kfree_skb(skb);
2386}
2387
Marcel Holtmann65164552005-10-28 19:20:48 +02002388static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
2390 struct hci_dev *hdev = (struct hci_dev *) arg;
2391 struct sk_buff *skb;
2392
2393 BT_DBG("%s", hdev->name);
2394
2395 read_lock(&hci_task_lock);
2396
2397 while ((skb = skb_dequeue(&hdev->rx_q))) {
2398 if (atomic_read(&hdev->promisc)) {
2399 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002400 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 }
2402
2403 if (test_bit(HCI_RAW, &hdev->flags)) {
2404 kfree_skb(skb);
2405 continue;
2406 }
2407
2408 if (test_bit(HCI_INIT, &hdev->flags)) {
2409 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002410 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 case HCI_ACLDATA_PKT:
2412 case HCI_SCODATA_PKT:
2413 kfree_skb(skb);
2414 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002415 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 }
2417
2418 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002419 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 case HCI_EVENT_PKT:
2421 hci_event_packet(hdev, skb);
2422 break;
2423
2424 case HCI_ACLDATA_PKT:
2425 BT_DBG("%s ACL data packet", hdev->name);
2426 hci_acldata_packet(hdev, skb);
2427 break;
2428
2429 case HCI_SCODATA_PKT:
2430 BT_DBG("%s SCO data packet", hdev->name);
2431 hci_scodata_packet(hdev, skb);
2432 break;
2433
2434 default:
2435 kfree_skb(skb);
2436 break;
2437 }
2438 }
2439
2440 read_unlock(&hci_task_lock);
2441}
2442
2443static void hci_cmd_task(unsigned long arg)
2444{
2445 struct hci_dev *hdev = (struct hci_dev *) arg;
2446 struct sk_buff *skb;
2447
2448 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002451 if (atomic_read(&hdev->cmd_cnt)) {
2452 skb = skb_dequeue(&hdev->cmd_q);
2453 if (!skb)
2454 return;
2455
Wei Yongjun7585b972009-02-25 18:29:52 +08002456 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002458 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2459 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 atomic_dec(&hdev->cmd_cnt);
2461 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002462 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002463 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 } else {
2465 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002466 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 }
2468 }
2469}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002470
2471module_param(enable_smp, bool, 0644);
2472MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");