blob: b47ae16f300827a8a16da99db2bb0e8efaeb3349 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
200{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200201 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800203 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200204 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Driver initialization */
209
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100216 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 skb_queue_purge(&hdev->driver_init);
219
220 /* Mandatory initialization */
221
222 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
224 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 /* Set default HCI Flow Control Mode */
233 if (hdev->dev_type == HCI_BREDR)
234 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
235 else
236 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
237
238 /* Read HCI Flow Control Mode */
239 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200242 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
245 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#if 0
248 /* Host buffer size */
249 {
250 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700251 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 cp.acl_max_pkt = cpu_to_le16(0xffff);
254 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257#endif
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (hdev->dev_type == HCI_BREDR) {
260 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 /* Read Local Supported Features */
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 /* Read BD Address */
266 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 /* Read Class of Device */
269 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Read Local Name */
272 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 /* Read Voice Setting */
275 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 /* Optional initialization */
278 /* Clear Event Filters */
279 flt_type = HCI_FLT_CLEAR_ALL;
280 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 /* Connection accept timeout ~20 secs */
283 param = cpu_to_le16(0x7d00);
284 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
285
286 bacpy(&cp.bdaddr, BDADDR_ANY);
287 cp.delete_all = 1;
288 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
289 sizeof(cp), &cp);
290 } else {
291 /* AMP initialization */
292 /* Connection accept timeout ~5 secs */
293 param = cpu_to_le16(0x1f40);
294 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
295
296 /* Read AMP Info */
297 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300301static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
302{
303 BT_DBG("%s", hdev->name);
304
305 /* Read LE buffer size */
306 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 scan = opt;
312
313 BT_DBG("%s %x", hdev->name, scan);
314
315 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 auth = opt;
322
323 BT_DBG("%s %x", hdev->name, auth);
324
325 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __u8 encrypt = opt;
332
333 BT_DBG("%s %x", hdev->name, encrypt);
334
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200335 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200336 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
340{
341 __le16 policy = cpu_to_le16(opt);
342
Marcel Holtmanna418b892008-11-30 12:17:28 +0100343 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200344
345 /* Default link policy */
346 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
347}
348
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900349/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * Device is held on return. */
351struct hci_dev *hci_dev_get(int index)
352{
353 struct hci_dev *hdev = NULL;
354 struct list_head *p;
355
356 BT_DBG("%d", index);
357
358 if (index < 0)
359 return NULL;
360
361 read_lock(&hci_dev_list_lock);
362 list_for_each(p, &hci_dev_list) {
363 struct hci_dev *d = list_entry(p, struct hci_dev, list);
364 if (d->id == index) {
365 hdev = hci_dev_hold(d);
366 break;
367 }
368 }
369 read_unlock(&hci_dev_list_lock);
370 return hdev;
371}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374/* ---- Inquiry support ---- */
375static void inquiry_cache_flush(struct hci_dev *hdev)
376{
377 struct inquiry_cache *cache = &hdev->inq_cache;
378 struct inquiry_entry *next = cache->list, *e;
379
380 BT_DBG("cache %p", cache);
381
382 cache->list = NULL;
383 while ((e = next)) {
384 next = e->next;
385 kfree(e);
386 }
387}
388
389struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_entry *e;
393
394 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
395
396 for (e = cache->list; e; e = e->next)
397 if (!bacmp(&e->data.bdaddr, bdaddr))
398 break;
399 return e;
400}
401
402void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
403{
404 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200409 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
410 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
413 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200415
416 ie->next = cache->list;
417 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200420 memcpy(&ie->data, data, sizeof(*data));
421 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 cache->timestamp = jiffies;
423}
424
425static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
426{
427 struct inquiry_cache *cache = &hdev->inq_cache;
428 struct inquiry_info *info = (struct inquiry_info *) buf;
429 struct inquiry_entry *e;
430 int copied = 0;
431
432 for (e = cache->list; e && copied < num; e = e->next, copied++) {
433 struct inquiry_data *data = &e->data;
434 bacpy(&info->bdaddr, &data->bdaddr);
435 info->pscan_rep_mode = data->pscan_rep_mode;
436 info->pscan_period_mode = data->pscan_period_mode;
437 info->pscan_mode = data->pscan_mode;
438 memcpy(info->dev_class, data->dev_class, 3);
439 info->clock_offset = data->clock_offset;
440 info++;
441 }
442
443 BT_DBG("cache %p, copied %d", cache, copied);
444 return copied;
445}
446
447static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
448{
449 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
450 struct hci_cp_inquiry cp;
451
452 BT_DBG("%s", hdev->name);
453
454 if (test_bit(HCI_INQUIRY, &hdev->flags))
455 return;
456
457 /* Start Inquiry */
458 memcpy(&cp.lap, &ir->lap, 3);
459 cp.length = ir->length;
460 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200461 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464int hci_inquiry(void __user *arg)
465{
466 __u8 __user *ptr = arg;
467 struct hci_inquiry_req ir;
468 struct hci_dev *hdev;
469 int err = 0, do_inquiry = 0, max_rsp;
470 long timeo;
471 __u8 *buf;
472
473 if (copy_from_user(&ir, ptr, sizeof(ir)))
474 return -EFAULT;
475
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200476 hdev = hci_dev_get(ir.dev_id);
477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -ENODEV;
479
480 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200482 inquiry_cache_empty(hdev) ||
483 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 inquiry_cache_flush(hdev);
485 do_inquiry = 1;
486 }
487 hci_dev_unlock_bh(hdev);
488
Marcel Holtmann04837f62006-07-03 10:02:33 +0200489 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490
491 if (do_inquiry) {
492 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
493 if (err < 0)
494 goto done;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* for unlimited number of responses we will use buffer with 255 entries */
498 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
499
500 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
501 * copy it to the user space.
502 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100503 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 err = -ENOMEM;
506 goto done;
507 }
508
509 hci_dev_lock_bh(hdev);
510 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
511 hci_dev_unlock_bh(hdev);
512
513 BT_DBG("num_rsp %d", ir.num_rsp);
514
515 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
516 ptr += sizeof(ir);
517 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
518 ir.num_rsp))
519 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900520 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 err = -EFAULT;
522
523 kfree(buf);
524
525done:
526 hci_dev_put(hdev);
527 return err;
528}
529
530/* ---- HCI ioctl helpers ---- */
531
532int hci_dev_open(__u16 dev)
533{
534 struct hci_dev *hdev;
535 int ret = 0;
536
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200537 hdev = hci_dev_get(dev);
538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return -ENODEV;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_lock(hdev);
544
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200545 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
546 ret = -ERFKILL;
547 goto done;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (test_bit(HCI_UP, &hdev->flags)) {
551 ret = -EALREADY;
552 goto done;
553 }
554
555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
556 set_bit(HCI_RAW, &hdev->flags);
557
558 if (hdev->open(hdev)) {
559 ret = -EIO;
560 goto done;
561 }
562
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 atomic_set(&hdev->cmd_cnt, 1);
565 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200566 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Marcel Holtmann04837f62006-07-03 10:02:33 +0200568 ret = __hci_request(hdev, hci_init_req, 0,
569 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300572 ret = __hci_request(hdev, hci_le_init_req, 0,
573 msecs_to_jiffies(HCI_INIT_TIMEOUT));
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 clear_bit(HCI_INIT, &hdev->flags);
576 }
577
578 if (!ret) {
579 hci_dev_hold(hdev);
580 set_bit(HCI_UP, &hdev->flags);
581 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700582 if (!test_bit(HCI_SETUP, &hdev->flags) &&
583 hdev->dev_type == HCI_BREDR)
Johan Hedberg5add6af2010-12-16 10:00:37 +0200584 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900585 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 /* Init failed, cleanup */
587 tasklet_kill(&hdev->rx_task);
588 tasklet_kill(&hdev->tx_task);
589 tasklet_kill(&hdev->cmd_task);
590
591 skb_queue_purge(&hdev->cmd_q);
592 skb_queue_purge(&hdev->rx_q);
593
594 if (hdev->flush)
595 hdev->flush(hdev);
596
597 if (hdev->sent_cmd) {
598 kfree_skb(hdev->sent_cmd);
599 hdev->sent_cmd = NULL;
600 }
601
602 hdev->close(hdev);
603 hdev->flags = 0;
604 }
605
606done:
607 hci_req_unlock(hdev);
608 hci_dev_put(hdev);
609 return ret;
610}
611
612static int hci_dev_do_close(struct hci_dev *hdev)
613{
614 BT_DBG("%s %p", hdev->name, hdev);
615
616 hci_req_cancel(hdev, ENODEV);
617 hci_req_lock(hdev);
618
619 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300620 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 hci_req_unlock(hdev);
622 return 0;
623 }
624
625 /* Kill RX and TX tasks */
626 tasklet_kill(&hdev->rx_task);
627 tasklet_kill(&hdev->tx_task);
628
629 hci_dev_lock_bh(hdev);
630 inquiry_cache_flush(hdev);
631 hci_conn_hash_flush(hdev);
632 hci_dev_unlock_bh(hdev);
633
634 hci_notify(hdev, HCI_DEV_DOWN);
635
636 if (hdev->flush)
637 hdev->flush(hdev);
638
639 /* Reset device */
640 skb_queue_purge(&hdev->cmd_q);
641 atomic_set(&hdev->cmd_cnt, 1);
642 if (!test_bit(HCI_RAW, &hdev->flags)) {
643 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200644 __hci_request(hdev, hci_reset_req, 0,
645 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 clear_bit(HCI_INIT, &hdev->flags);
647 }
648
649 /* Kill cmd task */
650 tasklet_kill(&hdev->cmd_task);
651
652 /* Drop queues */
653 skb_queue_purge(&hdev->rx_q);
654 skb_queue_purge(&hdev->cmd_q);
655 skb_queue_purge(&hdev->raw_q);
656
657 /* Drop last sent command */
658 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300659 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 kfree_skb(hdev->sent_cmd);
661 hdev->sent_cmd = NULL;
662 }
663
664 /* After this point our queues are empty
665 * and no tasks are scheduled. */
666 hdev->close(hdev);
667
Peter Krystad1fc44072011-08-30 15:38:12 -0700668 if (hdev->dev_type == HCI_BREDR)
669 mgmt_powered(hdev->id, 0);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200670
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 /* Clear flags */
672 hdev->flags = 0;
673
674 hci_req_unlock(hdev);
675
676 hci_dev_put(hdev);
677 return 0;
678}
679
680int hci_dev_close(__u16 dev)
681{
682 struct hci_dev *hdev;
683 int err;
684
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200685 hdev = hci_dev_get(dev);
686 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 return -ENODEV;
688 err = hci_dev_do_close(hdev);
689 hci_dev_put(hdev);
690 return err;
691}
692
693int hci_dev_reset(__u16 dev)
694{
695 struct hci_dev *hdev;
696 int ret = 0;
697
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200698 hdev = hci_dev_get(dev);
699 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 return -ENODEV;
701
702 hci_req_lock(hdev);
703 tasklet_disable(&hdev->tx_task);
704
705 if (!test_bit(HCI_UP, &hdev->flags))
706 goto done;
707
708 /* Drop queues */
709 skb_queue_purge(&hdev->rx_q);
710 skb_queue_purge(&hdev->cmd_q);
711
712 hci_dev_lock_bh(hdev);
713 inquiry_cache_flush(hdev);
714 hci_conn_hash_flush(hdev);
715 hci_dev_unlock_bh(hdev);
716
717 if (hdev->flush)
718 hdev->flush(hdev);
719
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900720 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300721 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200724 ret = __hci_request(hdev, hci_reset_req, 0,
725 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727done:
728 tasklet_enable(&hdev->tx_task);
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734int hci_dev_reset_stat(__u16 dev)
735{
736 struct hci_dev *hdev;
737 int ret = 0;
738
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200739 hdev = hci_dev_get(dev);
740 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -ENODEV;
742
743 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
744
745 hci_dev_put(hdev);
746
747 return ret;
748}
749
750int hci_dev_cmd(unsigned int cmd, void __user *arg)
751{
752 struct hci_dev *hdev;
753 struct hci_dev_req dr;
754 int err = 0;
755
756 if (copy_from_user(&dr, arg, sizeof(dr)))
757 return -EFAULT;
758
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200759 hdev = hci_dev_get(dr.dev_id);
760 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return -ENODEV;
762
763 switch (cmd) {
764 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200765 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 break;
768
769 case HCISETENCRYPT:
770 if (!lmp_encrypt_capable(hdev)) {
771 err = -EOPNOTSUPP;
772 break;
773 }
774
775 if (!test_bit(HCI_AUTH, &hdev->flags)) {
776 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200777 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (err)
780 break;
781 }
782
Marcel Holtmann04837f62006-07-03 10:02:33 +0200783 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200788 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
789 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200792 case HCISETLINKPOL:
793 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
795 break;
796
797 case HCISETLINKMODE:
798 hdev->link_mode = ((__u16) dr.dev_opt) &
799 (HCI_LM_MASTER | HCI_LM_ACCEPT);
800 break;
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 case HCISETPTYPE:
803 hdev->pkt_type = (__u16) dr.dev_opt;
804 break;
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200807 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
808 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810
811 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200812 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
813 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 break;
815
816 default:
817 err = -EINVAL;
818 break;
819 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hci_dev_put(hdev);
822 return err;
823}
824
825int hci_get_dev_list(void __user *arg)
826{
827 struct hci_dev_list_req *dl;
828 struct hci_dev_req *dr;
829 struct list_head *p;
830 int n = 0, size, err;
831 __u16 dev_num;
832
833 if (get_user(dev_num, (__u16 __user *) arg))
834 return -EFAULT;
835
836 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
837 return -EINVAL;
838
839 size = sizeof(*dl) + dev_num * sizeof(*dr);
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 dl = kzalloc(size, GFP_KERNEL);
842 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENOMEM;
844
845 dr = dl->dev_req;
846
847 read_lock_bh(&hci_dev_list_lock);
848 list_for_each(p, &hci_dev_list) {
849 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200852
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200853 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200854
855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 (dr + n)->dev_id = hdev->id;
859 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 if (++n >= dev_num)
862 break;
863 }
864 read_unlock_bh(&hci_dev_list_lock);
865
866 dl->dev_num = n;
867 size = sizeof(*dl) + n * sizeof(*dr);
868
869 err = copy_to_user(arg, dl, size);
870 kfree(dl);
871
872 return err ? -EFAULT : 0;
873}
874
875int hci_get_dev_info(void __user *arg)
876{
877 struct hci_dev *hdev;
878 struct hci_dev_info di;
879 int err = 0;
880
881 if (copy_from_user(&di, arg, sizeof(di)))
882 return -EFAULT;
883
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200884 hdev = hci_dev_get(di.dev_id);
885 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 return -ENODEV;
887
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200888 hci_del_off_timer(hdev);
889
Johan Hedbergc542a062011-01-26 13:11:03 +0200890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 strcpy(di.name, hdev->name);
894 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100895 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 di.flags = hdev->flags;
897 di.pkt_type = hdev->pkt_type;
898 di.acl_mtu = hdev->acl_mtu;
899 di.acl_pkts = hdev->acl_pkts;
900 di.sco_mtu = hdev->sco_mtu;
901 di.sco_pkts = hdev->sco_pkts;
902 di.link_policy = hdev->link_policy;
903 di.link_mode = hdev->link_mode;
904
905 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
906 memcpy(&di.features, &hdev->features, sizeof(di.features));
907
908 if (copy_to_user(arg, &di, sizeof(di)))
909 err = -EFAULT;
910
911 hci_dev_put(hdev);
912
913 return err;
914}
915
916/* ---- Interface to HCI drivers ---- */
917
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200918static int hci_rfkill_set_block(void *data, bool blocked)
919{
920 struct hci_dev *hdev = data;
921
922 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
923
924 if (!blocked)
925 return 0;
926
927 hci_dev_do_close(hdev);
928
929 return 0;
930}
931
932static const struct rfkill_ops hci_rfkill_ops = {
933 .set_block = hci_rfkill_set_block,
934};
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936/* Alloc HCI device */
937struct hci_dev *hci_alloc_dev(void)
938{
939 struct hci_dev *hdev;
940
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200941 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (!hdev)
943 return NULL;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 skb_queue_head_init(&hdev->driver_init);
946
947 return hdev;
948}
949EXPORT_SYMBOL(hci_alloc_dev);
950
951/* Free HCI device */
952void hci_free_dev(struct hci_dev *hdev)
953{
954 skb_queue_purge(&hdev->driver_init);
955
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200956 /* will free via device release */
957 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958}
959EXPORT_SYMBOL(hci_free_dev);
960
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200961static void hci_power_on(struct work_struct *work)
962{
963 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
964
965 BT_DBG("%s", hdev->name);
966
Brian Gixa68668b2011-08-11 15:49:36 -0700967 if (hci_dev_open(hdev->id) < 0 && !test_bit(HCI_UP, &hdev->flags))
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200968 return;
969
Peter Krystad1fc44072011-08-30 15:38:12 -0700970 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
971 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200972 mod_timer(&hdev->off_timer,
973 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
974
Peter Krystad1fc44072011-08-30 15:38:12 -0700975 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
976 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200977 mgmt_index_added(hdev->id);
978}
979
980static void hci_power_off(struct work_struct *work)
981{
982 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
983
984 BT_DBG("%s", hdev->name);
985
986 hci_dev_close(hdev->id);
987}
988
989static void hci_auto_off(unsigned long data)
990{
991 struct hci_dev *hdev = (struct hci_dev *) data;
992
993 BT_DBG("%s", hdev->name);
994
995 clear_bit(HCI_AUTO_OFF, &hdev->flags);
996
997 queue_work(hdev->workqueue, &hdev->power_off);
998}
999
1000void hci_del_off_timer(struct hci_dev *hdev)
1001{
1002 BT_DBG("%s", hdev->name);
1003
1004 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1005 del_timer(&hdev->off_timer);
1006}
1007
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001008int hci_uuids_clear(struct hci_dev *hdev)
1009{
1010 struct list_head *p, *n;
1011
1012 list_for_each_safe(p, n, &hdev->uuids) {
1013 struct bt_uuid *uuid;
1014
1015 uuid = list_entry(p, struct bt_uuid, list);
1016
1017 list_del(p);
1018 kfree(uuid);
1019 }
1020
1021 return 0;
1022}
1023
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001024int hci_link_keys_clear(struct hci_dev *hdev)
1025{
1026 struct list_head *p, *n;
1027
1028 list_for_each_safe(p, n, &hdev->link_keys) {
1029 struct link_key *key;
1030
1031 key = list_entry(p, struct link_key, list);
1032
1033 list_del(p);
1034 kfree(key);
1035 }
1036
1037 return 0;
1038}
1039
1040struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1041{
1042 struct list_head *p;
1043
1044 list_for_each(p, &hdev->link_keys) {
1045 struct link_key *k;
1046
1047 k = list_entry(p, struct link_key, list);
1048
1049 if (bacmp(bdaddr, &k->bdaddr) == 0)
1050 return k;
1051 }
1052
1053 return NULL;
1054}
1055
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001056struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1057{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001058 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 list_for_each(p, &hdev->link_keys) {
1061 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001062 struct key_master_id *id;
1063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001064 k = list_entry(p, struct link_key, list);
1065
1066 if (k->type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001067 continue;
1068
1069 if (k->dlen != sizeof(*id))
1070 continue;
1071
1072 id = (void *) &k->data;
1073 if (id->ediv == ediv &&
1074 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1075 return k;
1076 }
1077
1078 return NULL;
1079}
1080EXPORT_SYMBOL(hci_find_ltk);
1081
1082struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1083 bdaddr_t *bdaddr, u8 type)
1084{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001087 list_for_each(p, &hdev->link_keys) {
1088 struct link_key *k;
1089
1090 k = list_entry(p, struct link_key, list);
1091
1092 if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001093 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001095
1096 return NULL;
1097}
1098EXPORT_SYMBOL(hci_find_link_key_type);
1099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1101 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001102{
1103 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001104 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001105 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001106 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001107
1108 old_key = hci_find_link_key(hdev, bdaddr);
1109 if (old_key) {
1110 old_key_type = old_key->type;
1111 key = old_key;
1112 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001114 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1115 if (!key)
1116 return -ENOMEM;
1117 list_add(&key->list, &hdev->link_keys);
1118 }
1119
1120 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1121
1122 bacpy(&key->bdaddr, bdaddr);
1123 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001124 key->auth = 0x01;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 key->type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001126 key->pin_len = pin_len;
1127
Brian Gixa68668b2011-08-11 15:49:36 -07001128 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1129
Brian Gixdfdd9362011-08-18 09:58:02 -07001130 if (conn) {
1131 if (conn->remote_auth > 0x01)
1132 bonded = 1;
1133 else if (conn->auth_initiator && conn->auth_type > 0x01)
1134 bonded = 1;
1135 }
Brian Gixa68668b2011-08-11 15:49:36 -07001136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001138 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139
1140 if (type == 0x06)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001141 key->type = old_key_type;
1142
1143 return 0;
1144}
1145
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001146int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixa68668b2011-08-11 15:49:36 -07001147 u8 key_size, u8 auth, __le16 ediv, u8 rand[8],
1148 u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001149{
1150 struct link_key *key, *old_key;
1151 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001152
Brian Gixa68668b2011-08-11 15:49:36 -07001153 BT_DBG("%s Auth: %2.2X addr %s", hdev->name, auth, batostr(bdaddr));
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001156 if (old_key) {
1157 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001158 } else {
1159 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1160 if (!key)
1161 return -ENOMEM;
1162 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001163 }
1164
1165 key->dlen = sizeof(*id);
1166
1167 bacpy(&key->bdaddr, bdaddr);
1168 memcpy(key->val, ltk, sizeof(key->val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 key->type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001170 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001171 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001172
1173 id = (void *) &key->data;
1174 id->ediv = ediv;
1175 memcpy(id->rand, rand, sizeof(id->rand));
1176
1177 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001178 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001179
1180 return 0;
1181}
1182
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001183int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1184{
1185 struct link_key *key;
1186
1187 key = hci_find_link_key(hdev, bdaddr);
1188 if (!key)
1189 return -ENOENT;
1190
1191 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1192
1193 list_del(&key->list);
1194 kfree(key);
1195
1196 return 0;
1197}
1198
Ville Tervo6bd32322011-02-16 16:32:41 +02001199/* HCI command timer function */
1200static void hci_cmd_timer(unsigned long arg)
1201{
1202 struct hci_dev *hdev = (void *) arg;
1203
1204 BT_ERR("%s command tx timeout", hdev->name);
1205 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001206 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001207 tasklet_schedule(&hdev->cmd_task);
1208}
1209
Szymon Janc2763eda2011-03-22 13:12:22 +01001210struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1211 bdaddr_t *bdaddr)
1212{
1213 struct oob_data *data;
1214
1215 list_for_each_entry(data, &hdev->remote_oob_data, list)
1216 if (bacmp(bdaddr, &data->bdaddr) == 0)
1217 return data;
1218
1219 return NULL;
1220}
1221
1222int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223{
1224 struct oob_data *data;
1225
1226 data = hci_find_remote_oob_data(hdev, bdaddr);
1227 if (!data)
1228 return -ENOENT;
1229
1230 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1231
1232 list_del(&data->list);
1233 kfree(data);
1234
1235 return 0;
1236}
1237
1238int hci_remote_oob_data_clear(struct hci_dev *hdev)
1239{
1240 struct oob_data *data, *n;
1241
1242 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1243 list_del(&data->list);
1244 kfree(data);
1245 }
1246
1247 return 0;
1248}
1249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250static void hci_adv_clear(unsigned long arg)
1251{
1252 struct hci_dev *hdev = (void *) arg;
1253
1254 hci_adv_entries_clear(hdev);
1255}
1256
1257int hci_adv_entries_clear(struct hci_dev *hdev)
1258{
1259 struct list_head *p, *n;
1260
Brian Gixa68668b2011-08-11 15:49:36 -07001261 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001262 write_lock_bh(&hdev->adv_entries_lock);
1263
1264 list_for_each_safe(p, n, &hdev->adv_entries) {
1265 struct adv_entry *entry;
1266
1267 entry = list_entry(p, struct adv_entry, list);
1268
1269 list_del(p);
1270 kfree(entry);
1271 }
1272
1273 write_unlock_bh(&hdev->adv_entries_lock);
1274
1275 return 0;
1276}
1277
1278struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1279{
1280 struct list_head *p;
1281 struct adv_entry *res = NULL;
1282
Brian Gixa68668b2011-08-11 15:49:36 -07001283 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001284 read_lock_bh(&hdev->adv_entries_lock);
1285
1286 list_for_each(p, &hdev->adv_entries) {
1287 struct adv_entry *entry;
1288
1289 entry = list_entry(p, struct adv_entry, list);
1290
1291 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1292 res = entry;
1293 goto out;
1294 }
1295 }
1296out:
1297 read_unlock_bh(&hdev->adv_entries_lock);
1298 return res;
1299}
1300
1301static inline int is_connectable_adv(u8 evt_type)
1302{
1303 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1304 return 1;
1305
1306 return 0;
1307}
1308
Szymon Janc2763eda2011-03-22 13:12:22 +01001309int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1310 u8 *randomizer)
1311{
1312 struct oob_data *data;
1313
1314 data = hci_find_remote_oob_data(hdev, bdaddr);
1315
1316 if (!data) {
1317 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1318 if (!data)
1319 return -ENOMEM;
1320
1321 bacpy(&data->bdaddr, bdaddr);
1322 list_add(&data->list, &hdev->remote_oob_data);
1323 }
1324
1325 memcpy(data->hash, hash, sizeof(data->hash));
1326 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1327
1328 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1329
1330 return 0;
1331}
1332
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001333int hci_add_adv_entry(struct hci_dev *hdev,
1334 struct hci_ev_le_advertising_info *ev)
1335{
1336 struct adv_entry *entry;
1337
Brian Gixa68668b2011-08-11 15:49:36 -07001338 BT_DBG("");
1339
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001340 if (!is_connectable_adv(ev->evt_type))
1341 return -EINVAL;
1342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001344 /* Only new entries should be added to adv_entries. So, if
1345 * bdaddr was found, don't add it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 if (entry)
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001347 return 0;
1348
1349 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1350 if (!entry)
1351 return -ENOMEM;
1352
1353 bacpy(&entry->bdaddr, &ev->bdaddr);
1354 entry->bdaddr_type = ev->bdaddr_type;
1355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001357 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001359
1360 return 0;
1361}
1362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363static struct crypto_blkcipher *alloc_cypher(void)
1364{
1365 if (enable_smp)
1366 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1367
1368 return ERR_PTR(-ENOTSUPP);
1369}
1370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371/* Register HCI device */
1372int hci_register_dev(struct hci_dev *hdev)
1373{
1374 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001375 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001377 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1378 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
1380 if (!hdev->open || !hdev->close || !hdev->destruct)
1381 return -EINVAL;
1382
1383 write_lock_bh(&hci_dev_list_lock);
1384
1385 /* Find first available device id */
1386 list_for_each(p, &hci_dev_list) {
1387 if (list_entry(p, struct hci_dev, list)->id != id)
1388 break;
1389 head = p; id++;
1390 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 sprintf(hdev->name, "hci%d", id);
1393 hdev->id = id;
1394 list_add(&hdev->list, head);
1395
1396 atomic_set(&hdev->refcnt, 1);
1397 spin_lock_init(&hdev->lock);
1398
1399 hdev->flags = 0;
1400 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001401 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001403 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
Marcel Holtmann04837f62006-07-03 10:02:33 +02001405 hdev->idle_timeout = 0;
1406 hdev->sniff_max_interval = 800;
1407 hdev->sniff_min_interval = 80;
1408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001409 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1411 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1412
1413 skb_queue_head_init(&hdev->rx_q);
1414 skb_queue_head_init(&hdev->cmd_q);
1415 skb_queue_head_init(&hdev->raw_q);
1416
Ville Tervo6bd32322011-02-16 16:32:41 +02001417 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1418
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301419 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001420 hdev->reassembly[i] = NULL;
1421
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001423 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 inquiry_cache_init(hdev);
1426
1427 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
David Millerea4bd8b2010-07-30 21:54:49 -07001430 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001431
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001432 INIT_LIST_HEAD(&hdev->uuids);
1433
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001434 INIT_LIST_HEAD(&hdev->link_keys);
1435
Szymon Janc2763eda2011-03-22 13:12:22 +01001436 INIT_LIST_HEAD(&hdev->remote_oob_data);
1437
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001438 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439 rwlock_init(&hdev->adv_entries_lock);
1440 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001441
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001442 INIT_WORK(&hdev->power_on, hci_power_on);
1443 INIT_WORK(&hdev->power_off, hci_power_off);
1444 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1447
1448 atomic_set(&hdev->promisc, 0);
1449
1450 write_unlock_bh(&hci_dev_list_lock);
1451
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001452 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1453 if (!hdev->workqueue)
1454 goto nomem;
1455
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001457 if (IS_ERR(hdev->tfm))
1458 BT_INFO("Failed to load transform for ecb(aes): %ld",
1459 PTR_ERR(hdev->tfm));
1460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_register_sysfs(hdev);
1462
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001463 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1464 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1465 if (hdev->rfkill) {
1466 if (rfkill_register(hdev->rfkill) < 0) {
1467 rfkill_destroy(hdev->rfkill);
1468 hdev->rfkill = NULL;
1469 }
1470 }
1471
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001472 set_bit(HCI_AUTO_OFF, &hdev->flags);
1473 set_bit(HCI_SETUP, &hdev->flags);
1474 queue_work(hdev->workqueue, &hdev->power_on);
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 hci_notify(hdev, HCI_DEV_REG);
1477
1478 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001479
1480nomem:
1481 write_lock_bh(&hci_dev_list_lock);
1482 list_del(&hdev->list);
1483 write_unlock_bh(&hci_dev_list_lock);
1484
1485 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486}
1487EXPORT_SYMBOL(hci_register_dev);
1488
1489/* Unregister HCI device */
1490int hci_unregister_dev(struct hci_dev *hdev)
1491{
Marcel Holtmannef222012007-07-11 06:42:04 +02001492 int i;
1493
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001494 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 write_lock_bh(&hci_dev_list_lock);
1497 list_del(&hdev->list);
1498 write_unlock_bh(&hci_dev_list_lock);
1499
1500 hci_dev_do_close(hdev);
1501
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301502 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001503 kfree_skb(hdev->reassembly[i]);
1504
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001505 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001506 !test_bit(HCI_SETUP, &hdev->flags) &&
1507 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001508 mgmt_index_removed(hdev->id);
1509
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001510 if (!IS_ERR(hdev->tfm))
1511 crypto_free_blkcipher(hdev->tfm);
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 hci_notify(hdev, HCI_DEV_UNREG);
1514
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001515 if (hdev->rfkill) {
1516 rfkill_unregister(hdev->rfkill);
1517 rfkill_destroy(hdev->rfkill);
1518 }
1519
Dave Young147e2d52008-03-05 18:45:59 -08001520 hci_unregister_sysfs(hdev);
1521
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001522 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001523 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001524
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001525 destroy_workqueue(hdev->workqueue);
1526
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001527 hci_dev_lock_bh(hdev);
1528 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001529 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001530 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001531 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001532 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001533 hci_dev_unlock_bh(hdev);
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return 0;
1538}
1539EXPORT_SYMBOL(hci_unregister_dev);
1540
1541/* Suspend HCI device */
1542int hci_suspend_dev(struct hci_dev *hdev)
1543{
1544 hci_notify(hdev, HCI_DEV_SUSPEND);
1545 return 0;
1546}
1547EXPORT_SYMBOL(hci_suspend_dev);
1548
1549/* Resume HCI device */
1550int hci_resume_dev(struct hci_dev *hdev)
1551{
1552 hci_notify(hdev, HCI_DEV_RESUME);
1553 return 0;
1554}
1555EXPORT_SYMBOL(hci_resume_dev);
1556
Marcel Holtmann76bca882009-11-18 00:40:39 +01001557/* Receive frame from HCI drivers */
1558int hci_recv_frame(struct sk_buff *skb)
1559{
1560 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1561 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1562 && !test_bit(HCI_INIT, &hdev->flags))) {
1563 kfree_skb(skb);
1564 return -ENXIO;
1565 }
1566
1567 /* Incomming skb */
1568 bt_cb(skb)->incoming = 1;
1569
1570 /* Time stamp */
1571 __net_timestamp(skb);
1572
1573 /* Queue frame for rx task */
1574 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001575 tasklet_schedule(&hdev->rx_task);
1576
Marcel Holtmann76bca882009-11-18 00:40:39 +01001577 return 0;
1578}
1579EXPORT_SYMBOL(hci_recv_frame);
1580
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301581static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001582 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301583{
1584 int len = 0;
1585 int hlen = 0;
1586 int remain = count;
1587 struct sk_buff *skb;
1588 struct bt_skb_cb *scb;
1589
1590 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1591 index >= NUM_REASSEMBLY)
1592 return -EILSEQ;
1593
1594 skb = hdev->reassembly[index];
1595
1596 if (!skb) {
1597 switch (type) {
1598 case HCI_ACLDATA_PKT:
1599 len = HCI_MAX_FRAME_SIZE;
1600 hlen = HCI_ACL_HDR_SIZE;
1601 break;
1602 case HCI_EVENT_PKT:
1603 len = HCI_MAX_EVENT_SIZE;
1604 hlen = HCI_EVENT_HDR_SIZE;
1605 break;
1606 case HCI_SCODATA_PKT:
1607 len = HCI_MAX_SCO_SIZE;
1608 hlen = HCI_SCO_HDR_SIZE;
1609 break;
1610 }
1611
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001612 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301613 if (!skb)
1614 return -ENOMEM;
1615
1616 scb = (void *) skb->cb;
1617 scb->expect = hlen;
1618 scb->pkt_type = type;
1619
1620 skb->dev = (void *) hdev;
1621 hdev->reassembly[index] = skb;
1622 }
1623
1624 while (count) {
1625 scb = (void *) skb->cb;
1626 len = min(scb->expect, (__u16)count);
1627
1628 memcpy(skb_put(skb, len), data, len);
1629
1630 count -= len;
1631 data += len;
1632 scb->expect -= len;
1633 remain = count;
1634
1635 switch (type) {
1636 case HCI_EVENT_PKT:
1637 if (skb->len == HCI_EVENT_HDR_SIZE) {
1638 struct hci_event_hdr *h = hci_event_hdr(skb);
1639 scb->expect = h->plen;
1640
1641 if (skb_tailroom(skb) < scb->expect) {
1642 kfree_skb(skb);
1643 hdev->reassembly[index] = NULL;
1644 return -ENOMEM;
1645 }
1646 }
1647 break;
1648
1649 case HCI_ACLDATA_PKT:
1650 if (skb->len == HCI_ACL_HDR_SIZE) {
1651 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1652 scb->expect = __le16_to_cpu(h->dlen);
1653
1654 if (skb_tailroom(skb) < scb->expect) {
1655 kfree_skb(skb);
1656 hdev->reassembly[index] = NULL;
1657 return -ENOMEM;
1658 }
1659 }
1660 break;
1661
1662 case HCI_SCODATA_PKT:
1663 if (skb->len == HCI_SCO_HDR_SIZE) {
1664 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1665 scb->expect = h->dlen;
1666
1667 if (skb_tailroom(skb) < scb->expect) {
1668 kfree_skb(skb);
1669 hdev->reassembly[index] = NULL;
1670 return -ENOMEM;
1671 }
1672 }
1673 break;
1674 }
1675
1676 if (scb->expect == 0) {
1677 /* Complete frame */
1678
1679 bt_cb(skb)->pkt_type = type;
1680 hci_recv_frame(skb);
1681
1682 hdev->reassembly[index] = NULL;
1683 return remain;
1684 }
1685 }
1686
1687 return remain;
1688}
1689
Marcel Holtmannef222012007-07-11 06:42:04 +02001690int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1691{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301692 int rem = 0;
1693
Marcel Holtmannef222012007-07-11 06:42:04 +02001694 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1695 return -EILSEQ;
1696
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001697 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001698 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301699 if (rem < 0)
1700 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001701
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301702 data += (count - rem);
1703 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001704 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001705
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301706 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001707}
1708EXPORT_SYMBOL(hci_recv_fragment);
1709
Suraj Sumangala99811512010-07-14 13:02:19 +05301710#define STREAM_REASSEMBLY 0
1711
1712int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1713{
1714 int type;
1715 int rem = 0;
1716
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001717 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301718 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1719
1720 if (!skb) {
1721 struct { char type; } *pkt;
1722
1723 /* Start of the frame */
1724 pkt = data;
1725 type = pkt->type;
1726
1727 data++;
1728 count--;
1729 } else
1730 type = bt_cb(skb)->pkt_type;
1731
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001732 rem = hci_reassembly(hdev, type, data, count,
1733 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301734 if (rem < 0)
1735 return rem;
1736
1737 data += (count - rem);
1738 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001739 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301740
1741 return rem;
1742}
1743EXPORT_SYMBOL(hci_recv_stream_fragment);
1744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745/* ---- Interface to upper protocols ---- */
1746
1747/* Register/Unregister protocols.
1748 * hci_task_lock is used to ensure that no tasks are running. */
1749int hci_register_proto(struct hci_proto *hp)
1750{
1751 int err = 0;
1752
1753 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1754
1755 if (hp->id >= HCI_MAX_PROTO)
1756 return -EINVAL;
1757
1758 write_lock_bh(&hci_task_lock);
1759
1760 if (!hci_proto[hp->id])
1761 hci_proto[hp->id] = hp;
1762 else
1763 err = -EEXIST;
1764
1765 write_unlock_bh(&hci_task_lock);
1766
1767 return err;
1768}
1769EXPORT_SYMBOL(hci_register_proto);
1770
1771int hci_unregister_proto(struct hci_proto *hp)
1772{
1773 int err = 0;
1774
1775 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1776
1777 if (hp->id >= HCI_MAX_PROTO)
1778 return -EINVAL;
1779
1780 write_lock_bh(&hci_task_lock);
1781
1782 if (hci_proto[hp->id])
1783 hci_proto[hp->id] = NULL;
1784 else
1785 err = -ENOENT;
1786
1787 write_unlock_bh(&hci_task_lock);
1788
1789 return err;
1790}
1791EXPORT_SYMBOL(hci_unregister_proto);
1792
1793int hci_register_cb(struct hci_cb *cb)
1794{
1795 BT_DBG("%p name %s", cb, cb->name);
1796
1797 write_lock_bh(&hci_cb_list_lock);
1798 list_add(&cb->list, &hci_cb_list);
1799 write_unlock_bh(&hci_cb_list_lock);
1800
1801 return 0;
1802}
1803EXPORT_SYMBOL(hci_register_cb);
1804
1805int hci_unregister_cb(struct hci_cb *cb)
1806{
1807 BT_DBG("%p name %s", cb, cb->name);
1808
1809 write_lock_bh(&hci_cb_list_lock);
1810 list_del(&cb->list);
1811 write_unlock_bh(&hci_cb_list_lock);
1812
1813 return 0;
1814}
1815EXPORT_SYMBOL(hci_unregister_cb);
1816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001817int hci_register_amp(struct amp_mgr_cb *cb)
1818{
1819 BT_DBG("%p", cb);
1820
1821 write_lock_bh(&amp_mgr_cb_list_lock);
1822 list_add(&cb->list, &amp_mgr_cb_list);
1823 write_unlock_bh(&amp_mgr_cb_list_lock);
1824
1825 return 0;
1826}
1827EXPORT_SYMBOL(hci_register_amp);
1828
1829int hci_unregister_amp(struct amp_mgr_cb *cb)
1830{
1831 BT_DBG("%p", cb);
1832
1833 write_lock_bh(&amp_mgr_cb_list_lock);
1834 list_del(&cb->list);
1835 write_unlock_bh(&amp_mgr_cb_list_lock);
1836
1837 return 0;
1838}
1839EXPORT_SYMBOL(hci_unregister_amp);
1840
1841void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1842 struct sk_buff *skb)
1843{
1844 struct amp_mgr_cb *cb;
1845
1846 BT_DBG("opcode 0x%x", opcode);
1847
1848 read_lock_bh(&amp_mgr_cb_list_lock);
1849 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1850 if (cb->amp_cmd_complete_event)
1851 cb->amp_cmd_complete_event(hdev, opcode, skb);
1852 }
1853 read_unlock_bh(&amp_mgr_cb_list_lock);
1854}
1855
1856void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1857{
1858 struct amp_mgr_cb *cb;
1859
1860 BT_DBG("opcode 0x%x, status %d", opcode, status);
1861
1862 read_lock_bh(&amp_mgr_cb_list_lock);
1863 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1864 if (cb->amp_cmd_status_event)
1865 cb->amp_cmd_status_event(hdev, opcode, status);
1866 }
1867 read_unlock_bh(&amp_mgr_cb_list_lock);
1868}
1869
1870void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1871 struct sk_buff *skb)
1872{
1873 struct amp_mgr_cb *cb;
1874
1875 BT_DBG("ev_code 0x%x", ev_code);
1876
1877 read_lock_bh(&amp_mgr_cb_list_lock);
1878 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1879 if (cb->amp_event)
1880 cb->amp_event(hdev, ev_code, skb);
1881 }
1882 read_unlock_bh(&amp_mgr_cb_list_lock);
1883}
1884
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885static int hci_send_frame(struct sk_buff *skb)
1886{
1887 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1888
1889 if (!hdev) {
1890 kfree_skb(skb);
1891 return -ENODEV;
1892 }
1893
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001894 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
1896 if (atomic_read(&hdev->promisc)) {
1897 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001898 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001900 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 }
1902
1903 /* Get rid of skb owner, prior to sending to the driver. */
1904 skb_orphan(skb);
1905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 return hdev->send(skb);
1908}
1909
1910/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001911int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912{
1913 int len = HCI_COMMAND_HDR_SIZE + plen;
1914 struct hci_command_hdr *hdr;
1915 struct sk_buff *skb;
1916
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001917 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918
1919 skb = bt_skb_alloc(len, GFP_ATOMIC);
1920 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001921 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 return -ENOMEM;
1923 }
1924
1925 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001926 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 hdr->plen = plen;
1928
1929 if (plen)
1930 memcpy(skb_put(skb, plen), param, plen);
1931
1932 BT_DBG("skb len %d", skb->len);
1933
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001934 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001936
Johan Hedberga5040ef2011-01-10 13:28:59 +02001937 if (test_bit(HCI_INIT, &hdev->flags))
1938 hdev->init_last_cmd = opcode;
1939
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001941 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943 return 0;
1944}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001948void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949{
1950 struct hci_command_hdr *hdr;
1951
1952 if (!hdev->sent_cmd)
1953 return NULL;
1954
1955 hdr = (void *) hdev->sent_cmd->data;
1956
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001957 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 return NULL;
1959
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001960 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961
1962 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1963}
1964
1965/* Send ACL data */
1966static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1967{
1968 struct hci_acl_hdr *hdr;
1969 int len = skb->len;
1970
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001971 skb_push(skb, HCI_ACL_HDR_SIZE);
1972 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001973 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001974 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1975 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976}
1977
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001978void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
1979 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
1981 struct hci_dev *hdev = conn->hdev;
1982 struct sk_buff *list;
1983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001984 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985
1986 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001987 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001988 if (hdev->dev_type == HCI_BREDR)
1989 hci_add_acl_hdr(skb, conn->handle, flags);
1990 else
1991 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001993 list = skb_shinfo(skb)->frag_list;
1994 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 /* Non fragmented */
1996 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1997
1998 skb_queue_tail(&conn->data_q, skb);
1999 } else {
2000 /* Fragmented */
2001 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2002
2003 skb_shinfo(skb)->frag_list = NULL;
2004
2005 /* Queue all fragments atomically */
2006 spin_lock_bh(&conn->data_q.lock);
2007
2008 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002009 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002010 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 do {
2012 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002013
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002015 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002016 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2019
2020 __skb_queue_tail(&conn->data_q, skb);
2021 } while (list);
2022
2023 spin_unlock_bh(&conn->data_q.lock);
2024 }
2025
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002026 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028EXPORT_SYMBOL(hci_send_acl);
2029
2030/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002031void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032{
2033 struct hci_dev *hdev = conn->hdev;
2034 struct hci_sco_hdr hdr;
2035
2036 BT_DBG("%s len %d", hdev->name, skb->len);
2037
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002038 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 hdr.dlen = skb->len;
2040
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002041 skb_push(skb, HCI_SCO_HDR_SIZE);
2042 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002043 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002046 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002049 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050}
2051EXPORT_SYMBOL(hci_send_sco);
2052
2053/* ---- HCI TX task (outgoing data) ---- */
2054
2055/* HCI Connection scheduler */
2056static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2057{
2058 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002059 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 int num = 0, min = ~0;
2061 struct list_head *p;
2062
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002063 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 * added and removed with TX task disabled. */
2065 list_for_each(p, &h->list) {
2066 struct hci_conn *c;
2067 c = list_entry(p, struct hci_conn, list);
2068
Marcel Holtmann769be972008-07-14 20:13:49 +02002069 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002071
2072 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2073 continue;
2074
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 num++;
2076
2077 if (c->sent < min) {
2078 min = c->sent;
2079 conn = c;
2080 }
2081 }
2082
2083 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002084 int cnt, q;
2085
2086 switch (conn->type) {
2087 case ACL_LINK:
2088 cnt = hdev->acl_cnt;
2089 break;
2090 case SCO_LINK:
2091 case ESCO_LINK:
2092 cnt = hdev->sco_cnt;
2093 break;
2094 case LE_LINK:
2095 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2096 break;
2097 default:
2098 cnt = 0;
2099 BT_ERR("Unknown link type");
2100 }
2101
2102 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 *quote = q ? q : 1;
2104 } else
2105 *quote = 0;
2106
2107 BT_DBG("conn %p quote %d", conn, *quote);
2108 return conn;
2109}
2110
Ville Tervobae1f5d2011-02-10 22:38:53 -03002111static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
2113 struct hci_conn_hash *h = &hdev->conn_hash;
2114 struct list_head *p;
2115 struct hci_conn *c;
2116
Ville Tervobae1f5d2011-02-10 22:38:53 -03002117 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
2119 /* Kill stalled connections */
2120 list_for_each(p, &h->list) {
2121 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002122 if (c->type == type && c->sent) {
2123 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 hdev->name, batostr(&c->dst));
2125 hci_acl_disconn(c, 0x13);
2126 }
2127 }
2128}
2129
2130static inline void hci_sched_acl(struct hci_dev *hdev)
2131{
2132 struct hci_conn *conn;
2133 struct sk_buff *skb;
2134 int quote;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 if (!test_bit(HCI_RAW, &hdev->flags)) {
2139 /* ACL tx timeout must be longer than maximum
2140 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141 if (hdev->acl_cnt <= 0 &&
2142 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002143 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 }
2145
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002146 while (hdev->acl_cnt > 0 &&
2147 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2148 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2149 int count = 1;
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002153 if (hdev->flow_ctl_mode ==
2154 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2155 /* Calculate count of blocks used by
2156 * this packet
2157 */
2158 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2159 hdev->data_block_len) + 1;
2160
2161 if (count > hdev->acl_cnt)
2162 return;
2163
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002164 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 hci_send_frame(skb);
2167 hdev->acl_last_tx = jiffies;
2168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002169 hdev->acl_cnt -= count;
2170 quote -= count;
2171
2172 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 }
2174 }
2175}
2176
2177/* Schedule SCO */
2178static inline void hci_sched_sco(struct hci_dev *hdev)
2179{
2180 struct hci_conn *conn;
2181 struct sk_buff *skb;
2182 int quote;
2183
2184 BT_DBG("%s", hdev->name);
2185
2186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2187 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2188 BT_DBG("skb %p len %d", skb, skb->len);
2189 hci_send_frame(skb);
2190
2191 conn->sent++;
2192 if (conn->sent == ~0)
2193 conn->sent = 0;
2194 }
2195 }
2196}
2197
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002198static inline void hci_sched_esco(struct hci_dev *hdev)
2199{
2200 struct hci_conn *conn;
2201 struct sk_buff *skb;
2202 int quote;
2203
2204 BT_DBG("%s", hdev->name);
2205
2206 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2207 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2208 BT_DBG("skb %p len %d", skb, skb->len);
2209 hci_send_frame(skb);
2210
2211 conn->sent++;
2212 if (conn->sent == ~0)
2213 conn->sent = 0;
2214 }
2215 }
2216}
2217
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002218static inline void hci_sched_le(struct hci_dev *hdev)
2219{
2220 struct hci_conn *conn;
2221 struct sk_buff *skb;
2222 int quote, cnt;
2223
2224 BT_DBG("%s", hdev->name);
2225
2226 if (!test_bit(HCI_RAW, &hdev->flags)) {
2227 /* LE tx timeout must be longer than maximum
2228 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002229 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002230 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002231 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002232 }
2233
2234 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2235 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2236 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2237 BT_DBG("skb %p len %d", skb, skb->len);
2238
2239 hci_send_frame(skb);
2240 hdev->le_last_tx = jiffies;
2241
2242 cnt--;
2243 conn->sent++;
2244 }
2245 }
2246 if (hdev->le_pkts)
2247 hdev->le_cnt = cnt;
2248 else
2249 hdev->acl_cnt = cnt;
2250}
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252static void hci_tx_task(unsigned long arg)
2253{
2254 struct hci_dev *hdev = (struct hci_dev *) arg;
2255 struct sk_buff *skb;
2256
2257 read_lock(&hci_task_lock);
2258
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002259 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2260 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 /* Schedule queues and send stuff to HCI driver */
2263
2264 hci_sched_acl(hdev);
2265
2266 hci_sched_sco(hdev);
2267
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002268 hci_sched_esco(hdev);
2269
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002270 hci_sched_le(hdev);
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 /* Send next queued raw (unknown type) packet */
2273 while ((skb = skb_dequeue(&hdev->raw_q)))
2274 hci_send_frame(skb);
2275
2276 read_unlock(&hci_task_lock);
2277}
2278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
2281/* ACL data packet */
2282static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2283{
2284 struct hci_acl_hdr *hdr = (void *) skb->data;
2285 struct hci_conn *conn;
2286 __u16 handle, flags;
2287
2288 skb_pull(skb, HCI_ACL_HDR_SIZE);
2289
2290 handle = __le16_to_cpu(hdr->handle);
2291 flags = hci_flags(handle);
2292 handle = hci_handle(handle);
2293
2294 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2295
2296 hdev->stat.acl_rx++;
2297
2298 hci_dev_lock(hdev);
2299 conn = hci_conn_hash_lookup_handle(hdev, handle);
2300 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 if (conn) {
2303 register struct hci_proto *hp;
2304
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002305 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002308 hp = hci_proto[HCI_PROTO_L2CAP];
2309 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 hp->recv_acldata(conn, skb, flags);
2311 return;
2312 }
2313 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002314 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 hdev->name, handle);
2316 }
2317
2318 kfree_skb(skb);
2319}
2320
2321/* SCO data packet */
2322static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2323{
2324 struct hci_sco_hdr *hdr = (void *) skb->data;
2325 struct hci_conn *conn;
2326 __u16 handle;
2327
2328 skb_pull(skb, HCI_SCO_HDR_SIZE);
2329
2330 handle = __le16_to_cpu(hdr->handle);
2331
2332 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2333
2334 hdev->stat.sco_rx++;
2335
2336 hci_dev_lock(hdev);
2337 conn = hci_conn_hash_lookup_handle(hdev, handle);
2338 hci_dev_unlock(hdev);
2339
2340 if (conn) {
2341 register struct hci_proto *hp;
2342
2343 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002344 hp = hci_proto[HCI_PROTO_SCO];
2345 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 hp->recv_scodata(conn, skb);
2347 return;
2348 }
2349 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002350 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 hdev->name, handle);
2352 }
2353
2354 kfree_skb(skb);
2355}
2356
Marcel Holtmann65164552005-10-28 19:20:48 +02002357static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358{
2359 struct hci_dev *hdev = (struct hci_dev *) arg;
2360 struct sk_buff *skb;
2361
2362 BT_DBG("%s", hdev->name);
2363
2364 read_lock(&hci_task_lock);
2365
2366 while ((skb = skb_dequeue(&hdev->rx_q))) {
2367 if (atomic_read(&hdev->promisc)) {
2368 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002369 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370 }
2371
2372 if (test_bit(HCI_RAW, &hdev->flags)) {
2373 kfree_skb(skb);
2374 continue;
2375 }
2376
2377 if (test_bit(HCI_INIT, &hdev->flags)) {
2378 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002379 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 case HCI_ACLDATA_PKT:
2381 case HCI_SCODATA_PKT:
2382 kfree_skb(skb);
2383 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 }
2386
2387 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002388 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 case HCI_EVENT_PKT:
2390 hci_event_packet(hdev, skb);
2391 break;
2392
2393 case HCI_ACLDATA_PKT:
2394 BT_DBG("%s ACL data packet", hdev->name);
2395 hci_acldata_packet(hdev, skb);
2396 break;
2397
2398 case HCI_SCODATA_PKT:
2399 BT_DBG("%s SCO data packet", hdev->name);
2400 hci_scodata_packet(hdev, skb);
2401 break;
2402
2403 default:
2404 kfree_skb(skb);
2405 break;
2406 }
2407 }
2408
2409 read_unlock(&hci_task_lock);
2410}
2411
2412static void hci_cmd_task(unsigned long arg)
2413{
2414 struct hci_dev *hdev = (struct hci_dev *) arg;
2415 struct sk_buff *skb;
2416
2417 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002420 if (atomic_read(&hdev->cmd_cnt)) {
2421 skb = skb_dequeue(&hdev->cmd_q);
2422 if (!skb)
2423 return;
2424
Wei Yongjun7585b972009-02-25 18:29:52 +08002425 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002427 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2428 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 atomic_dec(&hdev->cmd_cnt);
2430 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002431 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002432 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 } else {
2434 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002435 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 }
2437 }
2438}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002439
2440module_param(enable_smp, bool, 0644);
2441MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");