blob: b095e996e3beaaf49ab585c18157e71513eb127b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070063static int enable_smp;
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
200{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200201 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800203 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200204 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Driver initialization */
209
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100216 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 skb_queue_purge(&hdev->driver_init);
219
220 /* Mandatory initialization */
221
222 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
224 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 /* Set default HCI Flow Control Mode */
233 if (hdev->dev_type == HCI_BREDR)
234 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
235 else
236 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
237
238 /* Read HCI Flow Control Mode */
239 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200242 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
245 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#if 0
248 /* Host buffer size */
249 {
250 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700251 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 cp.acl_max_pkt = cpu_to_le16(0xffff);
254 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257#endif
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (hdev->dev_type == HCI_BREDR) {
260 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 /* Read Local Supported Features */
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 /* Read BD Address */
266 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 /* Read Class of Device */
269 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Read Local Name */
272 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 /* Read Voice Setting */
275 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 /* Optional initialization */
278 /* Clear Event Filters */
279 flt_type = HCI_FLT_CLEAR_ALL;
280 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 /* Connection accept timeout ~20 secs */
283 param = cpu_to_le16(0x7d00);
284 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
285
286 bacpy(&cp.bdaddr, BDADDR_ANY);
287 cp.delete_all = 1;
288 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
289 sizeof(cp), &cp);
290 } else {
291 /* AMP initialization */
292 /* Connection accept timeout ~5 secs */
293 param = cpu_to_le16(0x1f40);
294 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
295
296 /* Read AMP Info */
297 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300301static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
302{
303 BT_DBG("%s", hdev->name);
304
305 /* Read LE buffer size */
306 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 scan = opt;
312
313 BT_DBG("%s %x", hdev->name, scan);
314
315 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 auth = opt;
322
323 BT_DBG("%s %x", hdev->name, auth);
324
325 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __u8 encrypt = opt;
332
333 BT_DBG("%s %x", hdev->name, encrypt);
334
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200335 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200336 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
340{
341 __le16 policy = cpu_to_le16(opt);
342
Marcel Holtmanna418b892008-11-30 12:17:28 +0100343 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200344
345 /* Default link policy */
346 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
347}
348
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900349/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * Device is held on return. */
351struct hci_dev *hci_dev_get(int index)
352{
353 struct hci_dev *hdev = NULL;
354 struct list_head *p;
355
356 BT_DBG("%d", index);
357
358 if (index < 0)
359 return NULL;
360
361 read_lock(&hci_dev_list_lock);
362 list_for_each(p, &hci_dev_list) {
363 struct hci_dev *d = list_entry(p, struct hci_dev, list);
364 if (d->id == index) {
365 hdev = hci_dev_hold(d);
366 break;
367 }
368 }
369 read_unlock(&hci_dev_list_lock);
370 return hdev;
371}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374/* ---- Inquiry support ---- */
375static void inquiry_cache_flush(struct hci_dev *hdev)
376{
377 struct inquiry_cache *cache = &hdev->inq_cache;
378 struct inquiry_entry *next = cache->list, *e;
379
380 BT_DBG("cache %p", cache);
381
382 cache->list = NULL;
383 while ((e = next)) {
384 next = e->next;
385 kfree(e);
386 }
387}
388
389struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_entry *e;
393
394 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
395
396 for (e = cache->list; e; e = e->next)
397 if (!bacmp(&e->data.bdaddr, bdaddr))
398 break;
399 return e;
400}
401
402void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
403{
404 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200409 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
410 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
413 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200415
416 ie->next = cache->list;
417 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200420 memcpy(&ie->data, data, sizeof(*data));
421 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 cache->timestamp = jiffies;
423}
424
425static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
426{
427 struct inquiry_cache *cache = &hdev->inq_cache;
428 struct inquiry_info *info = (struct inquiry_info *) buf;
429 struct inquiry_entry *e;
430 int copied = 0;
431
432 for (e = cache->list; e && copied < num; e = e->next, copied++) {
433 struct inquiry_data *data = &e->data;
434 bacpy(&info->bdaddr, &data->bdaddr);
435 info->pscan_rep_mode = data->pscan_rep_mode;
436 info->pscan_period_mode = data->pscan_period_mode;
437 info->pscan_mode = data->pscan_mode;
438 memcpy(info->dev_class, data->dev_class, 3);
439 info->clock_offset = data->clock_offset;
440 info++;
441 }
442
443 BT_DBG("cache %p, copied %d", cache, copied);
444 return copied;
445}
446
447static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
448{
449 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
450 struct hci_cp_inquiry cp;
451
452 BT_DBG("%s", hdev->name);
453
454 if (test_bit(HCI_INQUIRY, &hdev->flags))
455 return;
456
457 /* Start Inquiry */
458 memcpy(&cp.lap, &ir->lap, 3);
459 cp.length = ir->length;
460 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200461 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464int hci_inquiry(void __user *arg)
465{
466 __u8 __user *ptr = arg;
467 struct hci_inquiry_req ir;
468 struct hci_dev *hdev;
469 int err = 0, do_inquiry = 0, max_rsp;
470 long timeo;
471 __u8 *buf;
472
473 if (copy_from_user(&ir, ptr, sizeof(ir)))
474 return -EFAULT;
475
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200476 hdev = hci_dev_get(ir.dev_id);
477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -ENODEV;
479
480 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200482 inquiry_cache_empty(hdev) ||
483 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 inquiry_cache_flush(hdev);
485 do_inquiry = 1;
486 }
487 hci_dev_unlock_bh(hdev);
488
Marcel Holtmann04837f62006-07-03 10:02:33 +0200489 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490
491 if (do_inquiry) {
492 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
493 if (err < 0)
494 goto done;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* for unlimited number of responses we will use buffer with 255 entries */
498 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
499
500 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
501 * copy it to the user space.
502 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100503 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 err = -ENOMEM;
506 goto done;
507 }
508
509 hci_dev_lock_bh(hdev);
510 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
511 hci_dev_unlock_bh(hdev);
512
513 BT_DBG("num_rsp %d", ir.num_rsp);
514
515 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
516 ptr += sizeof(ir);
517 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
518 ir.num_rsp))
519 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900520 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 err = -EFAULT;
522
523 kfree(buf);
524
525done:
526 hci_dev_put(hdev);
527 return err;
528}
529
530/* ---- HCI ioctl helpers ---- */
531
532int hci_dev_open(__u16 dev)
533{
534 struct hci_dev *hdev;
535 int ret = 0;
536
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200537 hdev = hci_dev_get(dev);
538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return -ENODEV;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_lock(hdev);
544
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200545 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
546 ret = -ERFKILL;
547 goto done;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (test_bit(HCI_UP, &hdev->flags)) {
551 ret = -EALREADY;
552 goto done;
553 }
554
555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
556 set_bit(HCI_RAW, &hdev->flags);
557
558 if (hdev->open(hdev)) {
559 ret = -EIO;
560 goto done;
561 }
562
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 atomic_set(&hdev->cmd_cnt, 1);
565 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200566 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Marcel Holtmann04837f62006-07-03 10:02:33 +0200568 ret = __hci_request(hdev, hci_init_req, 0,
569 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300572 ret = __hci_request(hdev, hci_le_init_req, 0,
573 msecs_to_jiffies(HCI_INIT_TIMEOUT));
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 clear_bit(HCI_INIT, &hdev->flags);
576 }
577
578 if (!ret) {
579 hci_dev_hold(hdev);
580 set_bit(HCI_UP, &hdev->flags);
581 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200582 if (!test_bit(HCI_SETUP, &hdev->flags))
583 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900584 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 /* Init failed, cleanup */
586 tasklet_kill(&hdev->rx_task);
587 tasklet_kill(&hdev->tx_task);
588 tasklet_kill(&hdev->cmd_task);
589
590 skb_queue_purge(&hdev->cmd_q);
591 skb_queue_purge(&hdev->rx_q);
592
593 if (hdev->flush)
594 hdev->flush(hdev);
595
596 if (hdev->sent_cmd) {
597 kfree_skb(hdev->sent_cmd);
598 hdev->sent_cmd = NULL;
599 }
600
601 hdev->close(hdev);
602 hdev->flags = 0;
603 }
604
605done:
606 hci_req_unlock(hdev);
607 hci_dev_put(hdev);
608 return ret;
609}
610
611static int hci_dev_do_close(struct hci_dev *hdev)
612{
613 BT_DBG("%s %p", hdev->name, hdev);
614
615 hci_req_cancel(hdev, ENODEV);
616 hci_req_lock(hdev);
617
618 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300619 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 hci_req_unlock(hdev);
621 return 0;
622 }
623
624 /* Kill RX and TX tasks */
625 tasklet_kill(&hdev->rx_task);
626 tasklet_kill(&hdev->tx_task);
627
628 hci_dev_lock_bh(hdev);
629 inquiry_cache_flush(hdev);
630 hci_conn_hash_flush(hdev);
631 hci_dev_unlock_bh(hdev);
632
633 hci_notify(hdev, HCI_DEV_DOWN);
634
635 if (hdev->flush)
636 hdev->flush(hdev);
637
638 /* Reset device */
639 skb_queue_purge(&hdev->cmd_q);
640 atomic_set(&hdev->cmd_cnt, 1);
641 if (!test_bit(HCI_RAW, &hdev->flags)) {
642 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200643 __hci_request(hdev, hci_reset_req, 0,
644 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 clear_bit(HCI_INIT, &hdev->flags);
646 }
647
648 /* Kill cmd task */
649 tasklet_kill(&hdev->cmd_task);
650
651 /* Drop queues */
652 skb_queue_purge(&hdev->rx_q);
653 skb_queue_purge(&hdev->cmd_q);
654 skb_queue_purge(&hdev->raw_q);
655
656 /* Drop last sent command */
657 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300658 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 kfree_skb(hdev->sent_cmd);
660 hdev->sent_cmd = NULL;
661 }
662
663 /* After this point our queues are empty
664 * and no tasks are scheduled. */
665 hdev->close(hdev);
666
Johan Hedberg5add6af2010-12-16 10:00:37 +0200667 mgmt_powered(hdev->id, 0);
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 /* Clear flags */
670 hdev->flags = 0;
671
672 hci_req_unlock(hdev);
673
674 hci_dev_put(hdev);
675 return 0;
676}
677
678int hci_dev_close(__u16 dev)
679{
680 struct hci_dev *hdev;
681 int err;
682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200683 hdev = hci_dev_get(dev);
684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return -ENODEV;
686 err = hci_dev_do_close(hdev);
687 hci_dev_put(hdev);
688 return err;
689}
690
691int hci_dev_reset(__u16 dev)
692{
693 struct hci_dev *hdev;
694 int ret = 0;
695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200696 hdev = hci_dev_get(dev);
697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return -ENODEV;
699
700 hci_req_lock(hdev);
701 tasklet_disable(&hdev->tx_task);
702
703 if (!test_bit(HCI_UP, &hdev->flags))
704 goto done;
705
706 /* Drop queues */
707 skb_queue_purge(&hdev->rx_q);
708 skb_queue_purge(&hdev->cmd_q);
709
710 hci_dev_lock_bh(hdev);
711 inquiry_cache_flush(hdev);
712 hci_conn_hash_flush(hdev);
713 hci_dev_unlock_bh(hdev);
714
715 if (hdev->flush)
716 hdev->flush(hdev);
717
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900718 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300719 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200722 ret = __hci_request(hdev, hci_reset_req, 0,
723 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725done:
726 tasklet_enable(&hdev->tx_task);
727 hci_req_unlock(hdev);
728 hci_dev_put(hdev);
729 return ret;
730}
731
732int hci_dev_reset_stat(__u16 dev)
733{
734 struct hci_dev *hdev;
735 int ret = 0;
736
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200737 hdev = hci_dev_get(dev);
738 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 return -ENODEV;
740
741 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
742
743 hci_dev_put(hdev);
744
745 return ret;
746}
747
748int hci_dev_cmd(unsigned int cmd, void __user *arg)
749{
750 struct hci_dev *hdev;
751 struct hci_dev_req dr;
752 int err = 0;
753
754 if (copy_from_user(&dr, arg, sizeof(dr)))
755 return -EFAULT;
756
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200757 hdev = hci_dev_get(dr.dev_id);
758 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return -ENODEV;
760
761 switch (cmd) {
762 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200763 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766
767 case HCISETENCRYPT:
768 if (!lmp_encrypt_capable(hdev)) {
769 err = -EOPNOTSUPP;
770 break;
771 }
772
773 if (!test_bit(HCI_AUTH, &hdev->flags)) {
774 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200775 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (err)
778 break;
779 }
780
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200786 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 break;
789
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200790 case HCISETLINKPOL:
791 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
793 break;
794
795 case HCISETLINKMODE:
796 hdev->link_mode = ((__u16) dr.dev_opt) &
797 (HCI_LM_MASTER | HCI_LM_ACCEPT);
798 break;
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 case HCISETPTYPE:
801 hdev->pkt_type = (__u16) dr.dev_opt;
802 break;
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200805 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
806 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 break;
808
809 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200810 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 break;
813
814 default:
815 err = -EINVAL;
816 break;
817 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 hci_dev_put(hdev);
820 return err;
821}
822
823int hci_get_dev_list(void __user *arg)
824{
825 struct hci_dev_list_req *dl;
826 struct hci_dev_req *dr;
827 struct list_head *p;
828 int n = 0, size, err;
829 __u16 dev_num;
830
831 if (get_user(dev_num, (__u16 __user *) arg))
832 return -EFAULT;
833
834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
835 return -EINVAL;
836
837 size = sizeof(*dl) + dev_num * sizeof(*dr);
838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200839 dl = kzalloc(size, GFP_KERNEL);
840 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENOMEM;
842
843 dr = dl->dev_req;
844
845 read_lock_bh(&hci_dev_list_lock);
846 list_for_each(p, &hci_dev_list) {
847 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200850
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200851 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200852
853 if (!test_bit(HCI_MGMT, &hdev->flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags);
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (++n >= dev_num)
860 break;
861 }
862 read_unlock_bh(&hci_dev_list_lock);
863
864 dl->dev_num = n;
865 size = sizeof(*dl) + n * sizeof(*dr);
866
867 err = copy_to_user(arg, dl, size);
868 kfree(dl);
869
870 return err ? -EFAULT : 0;
871}
872
873int hci_get_dev_info(void __user *arg)
874{
875 struct hci_dev *hdev;
876 struct hci_dev_info di;
877 int err = 0;
878
879 if (copy_from_user(&di, arg, sizeof(di)))
880 return -EFAULT;
881
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200882 hdev = hci_dev_get(di.dev_id);
883 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return -ENODEV;
885
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200886 hci_del_off_timer(hdev);
887
Johan Hedbergc542a062011-01-26 13:11:03 +0200888 if (!test_bit(HCI_MGMT, &hdev->flags))
889 set_bit(HCI_PAIRABLE, &hdev->flags);
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 strcpy(di.name, hdev->name);
892 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100893 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 di.flags = hdev->flags;
895 di.pkt_type = hdev->pkt_type;
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 di.link_policy = hdev->link_policy;
901 di.link_mode = hdev->link_mode;
902
903 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
904 memcpy(&di.features, &hdev->features, sizeof(di.features));
905
906 if (copy_to_user(arg, &di, sizeof(di)))
907 err = -EFAULT;
908
909 hci_dev_put(hdev);
910
911 return err;
912}
913
914/* ---- Interface to HCI drivers ---- */
915
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200916static int hci_rfkill_set_block(void *data, bool blocked)
917{
918 struct hci_dev *hdev = data;
919
920 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
921
922 if (!blocked)
923 return 0;
924
925 hci_dev_do_close(hdev);
926
927 return 0;
928}
929
930static const struct rfkill_ops hci_rfkill_ops = {
931 .set_block = hci_rfkill_set_block,
932};
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934/* Alloc HCI device */
935struct hci_dev *hci_alloc_dev(void)
936{
937 struct hci_dev *hdev;
938
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200939 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (!hdev)
941 return NULL;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 skb_queue_head_init(&hdev->driver_init);
944
945 return hdev;
946}
947EXPORT_SYMBOL(hci_alloc_dev);
948
949/* Free HCI device */
950void hci_free_dev(struct hci_dev *hdev)
951{
952 skb_queue_purge(&hdev->driver_init);
953
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200954 /* will free via device release */
955 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957EXPORT_SYMBOL(hci_free_dev);
958
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200959static void hci_power_on(struct work_struct *work)
960{
961 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
962
963 BT_DBG("%s", hdev->name);
964
965 if (hci_dev_open(hdev->id) < 0)
966 return;
967
968 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
969 mod_timer(&hdev->off_timer,
970 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
971
972 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
973 mgmt_index_added(hdev->id);
974}
975
976static void hci_power_off(struct work_struct *work)
977{
978 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_close(hdev->id);
983}
984
985static void hci_auto_off(unsigned long data)
986{
987 struct hci_dev *hdev = (struct hci_dev *) data;
988
989 BT_DBG("%s", hdev->name);
990
991 clear_bit(HCI_AUTO_OFF, &hdev->flags);
992
993 queue_work(hdev->workqueue, &hdev->power_off);
994}
995
996void hci_del_off_timer(struct hci_dev *hdev)
997{
998 BT_DBG("%s", hdev->name);
999
1000 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1001 del_timer(&hdev->off_timer);
1002}
1003
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001004int hci_uuids_clear(struct hci_dev *hdev)
1005{
1006 struct list_head *p, *n;
1007
1008 list_for_each_safe(p, n, &hdev->uuids) {
1009 struct bt_uuid *uuid;
1010
1011 uuid = list_entry(p, struct bt_uuid, list);
1012
1013 list_del(p);
1014 kfree(uuid);
1015 }
1016
1017 return 0;
1018}
1019
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001020int hci_link_keys_clear(struct hci_dev *hdev)
1021{
1022 struct list_head *p, *n;
1023
1024 list_for_each_safe(p, n, &hdev->link_keys) {
1025 struct link_key *key;
1026
1027 key = list_entry(p, struct link_key, list);
1028
1029 list_del(p);
1030 kfree(key);
1031 }
1032
1033 return 0;
1034}
1035
1036struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1037{
1038 struct list_head *p;
1039
1040 list_for_each(p, &hdev->link_keys) {
1041 struct link_key *k;
1042
1043 k = list_entry(p, struct link_key, list);
1044
1045 if (bacmp(bdaddr, &k->bdaddr) == 0)
1046 return k;
1047 }
1048
1049 return NULL;
1050}
1051
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001052struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 list_for_each(p, &hdev->link_keys) {
1057 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001058 struct key_master_id *id;
1059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 k = list_entry(p, struct link_key, list);
1061
1062 if (k->type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001063 continue;
1064
1065 if (k->dlen != sizeof(*id))
1066 continue;
1067
1068 id = (void *) &k->data;
1069 if (id->ediv == ediv &&
1070 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1071 return k;
1072 }
1073
1074 return NULL;
1075}
1076EXPORT_SYMBOL(hci_find_ltk);
1077
1078struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1079 bdaddr_t *bdaddr, u8 type)
1080{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083 list_for_each(p, &hdev->link_keys) {
1084 struct link_key *k;
1085
1086 k = list_entry(p, struct link_key, list);
1087
1088 if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001089 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1097 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098{
1099 struct link_key *key, *old_key;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001100 u8 old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001101
1102 old_key = hci_find_link_key(hdev, bdaddr);
1103 if (old_key) {
1104 old_key_type = old_key->type;
1105 key = old_key;
1106 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001107 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001108 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1109 if (!key)
1110 return -ENOMEM;
1111 list_add(&key->list, &hdev->link_keys);
1112 }
1113
1114 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1115
1116 bacpy(&key->bdaddr, bdaddr);
1117 memcpy(key->val, val, 16);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 key->type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001119 key->pin_len = pin_len;
1120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 if (new_key)
1122 mgmt_new_key(hdev->id, key, old_key_type);
1123
1124 if (type == 0x06)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001125 key->type = old_key_type;
1126
1127 return 0;
1128}
1129
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001130int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001131 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001132{
1133 struct link_key *key, *old_key;
1134 struct key_master_id *id;
1135 u8 old_key_type;
1136
1137 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1138
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001139 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001140 if (old_key) {
1141 key = old_key;
1142 old_key_type = old_key->type;
1143 } else {
1144 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1145 if (!key)
1146 return -ENOMEM;
1147 list_add(&key->list, &hdev->link_keys);
1148 old_key_type = 0xff;
1149 }
1150
1151 key->dlen = sizeof(*id);
1152
1153 bacpy(&key->bdaddr, bdaddr);
1154 memcpy(key->val, ltk, sizeof(key->val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001155 key->type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001156 key->pin_len = key_size;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001157
1158 id = (void *) &key->data;
1159 id->ediv = ediv;
1160 memcpy(id->rand, rand, sizeof(id->rand));
1161
1162 if (new_key)
1163 mgmt_new_key(hdev->id, key, old_key_type);
1164
1165 return 0;
1166}
1167
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001168int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169{
1170 struct link_key *key;
1171
1172 key = hci_find_link_key(hdev, bdaddr);
1173 if (!key)
1174 return -ENOENT;
1175
1176 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1177
1178 list_del(&key->list);
1179 kfree(key);
1180
1181 return 0;
1182}
1183
Ville Tervo6bd32322011-02-16 16:32:41 +02001184/* HCI command timer function */
1185static void hci_cmd_timer(unsigned long arg)
1186{
1187 struct hci_dev *hdev = (void *) arg;
1188
1189 BT_ERR("%s command tx timeout", hdev->name);
1190 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001192 tasklet_schedule(&hdev->cmd_task);
1193}
1194
Szymon Janc2763eda2011-03-22 13:12:22 +01001195struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1196 bdaddr_t *bdaddr)
1197{
1198 struct oob_data *data;
1199
1200 list_for_each_entry(data, &hdev->remote_oob_data, list)
1201 if (bacmp(bdaddr, &data->bdaddr) == 0)
1202 return data;
1203
1204 return NULL;
1205}
1206
1207int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208{
1209 struct oob_data *data;
1210
1211 data = hci_find_remote_oob_data(hdev, bdaddr);
1212 if (!data)
1213 return -ENOENT;
1214
1215 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1216
1217 list_del(&data->list);
1218 kfree(data);
1219
1220 return 0;
1221}
1222
1223int hci_remote_oob_data_clear(struct hci_dev *hdev)
1224{
1225 struct oob_data *data, *n;
1226
1227 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1228 list_del(&data->list);
1229 kfree(data);
1230 }
1231
1232 return 0;
1233}
1234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235static void hci_adv_clear(unsigned long arg)
1236{
1237 struct hci_dev *hdev = (void *) arg;
1238
1239 hci_adv_entries_clear(hdev);
1240}
1241
1242int hci_adv_entries_clear(struct hci_dev *hdev)
1243{
1244 struct list_head *p, *n;
1245
1246 write_lock_bh(&hdev->adv_entries_lock);
1247
1248 list_for_each_safe(p, n, &hdev->adv_entries) {
1249 struct adv_entry *entry;
1250
1251 entry = list_entry(p, struct adv_entry, list);
1252
1253 list_del(p);
1254 kfree(entry);
1255 }
1256
1257 write_unlock_bh(&hdev->adv_entries_lock);
1258
1259 return 0;
1260}
1261
1262struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1263{
1264 struct list_head *p;
1265 struct adv_entry *res = NULL;
1266
1267 read_lock_bh(&hdev->adv_entries_lock);
1268
1269 list_for_each(p, &hdev->adv_entries) {
1270 struct adv_entry *entry;
1271
1272 entry = list_entry(p, struct adv_entry, list);
1273
1274 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1275 res = entry;
1276 goto out;
1277 }
1278 }
1279out:
1280 read_unlock_bh(&hdev->adv_entries_lock);
1281 return res;
1282}
1283
1284static inline int is_connectable_adv(u8 evt_type)
1285{
1286 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1287 return 1;
1288
1289 return 0;
1290}
1291
Szymon Janc2763eda2011-03-22 13:12:22 +01001292int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1293 u8 *randomizer)
1294{
1295 struct oob_data *data;
1296
1297 data = hci_find_remote_oob_data(hdev, bdaddr);
1298
1299 if (!data) {
1300 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1301 if (!data)
1302 return -ENOMEM;
1303
1304 bacpy(&data->bdaddr, bdaddr);
1305 list_add(&data->list, &hdev->remote_oob_data);
1306 }
1307
1308 memcpy(data->hash, hash, sizeof(data->hash));
1309 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1310
1311 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1312
1313 return 0;
1314}
1315
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001316int hci_add_adv_entry(struct hci_dev *hdev,
1317 struct hci_ev_le_advertising_info *ev)
1318{
1319 struct adv_entry *entry;
1320
1321 if (!is_connectable_adv(ev->evt_type))
1322 return -EINVAL;
1323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001325 /* Only new entries should be added to adv_entries. So, if
1326 * bdaddr was found, don't add it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 if (entry)
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001328 return 0;
1329
1330 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1331 if (!entry)
1332 return -ENOMEM;
1333
1334 bacpy(&entry->bdaddr, &ev->bdaddr);
1335 entry->bdaddr_type = ev->bdaddr_type;
1336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001338 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001340
1341 return 0;
1342}
1343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344static struct crypto_blkcipher *alloc_cypher(void)
1345{
1346 if (enable_smp)
1347 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1348
1349 return ERR_PTR(-ENOTSUPP);
1350}
1351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352/* Register HCI device */
1353int hci_register_dev(struct hci_dev *hdev)
1354{
1355 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001356 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001358 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1359 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 if (!hdev->open || !hdev->close || !hdev->destruct)
1362 return -EINVAL;
1363
1364 write_lock_bh(&hci_dev_list_lock);
1365
1366 /* Find first available device id */
1367 list_for_each(p, &hci_dev_list) {
1368 if (list_entry(p, struct hci_dev, list)->id != id)
1369 break;
1370 head = p; id++;
1371 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 sprintf(hdev->name, "hci%d", id);
1374 hdev->id = id;
1375 list_add(&hdev->list, head);
1376
1377 atomic_set(&hdev->refcnt, 1);
1378 spin_lock_init(&hdev->lock);
1379
1380 hdev->flags = 0;
1381 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001382 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001384 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385
Marcel Holtmann04837f62006-07-03 10:02:33 +02001386 hdev->idle_timeout = 0;
1387 hdev->sniff_max_interval = 800;
1388 hdev->sniff_min_interval = 80;
1389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001390 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1392 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1393
1394 skb_queue_head_init(&hdev->rx_q);
1395 skb_queue_head_init(&hdev->cmd_q);
1396 skb_queue_head_init(&hdev->raw_q);
1397
Ville Tervo6bd32322011-02-16 16:32:41 +02001398 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1399
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301400 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001401 hdev->reassembly[i] = NULL;
1402
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001404 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
1406 inquiry_cache_init(hdev);
1407
1408 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
David Millerea4bd8b2010-07-30 21:54:49 -07001411 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001412
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001413 INIT_LIST_HEAD(&hdev->uuids);
1414
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001415 INIT_LIST_HEAD(&hdev->link_keys);
1416
Szymon Janc2763eda2011-03-22 13:12:22 +01001417 INIT_LIST_HEAD(&hdev->remote_oob_data);
1418
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001419 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 rwlock_init(&hdev->adv_entries_lock);
1421 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001422
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001423 INIT_WORK(&hdev->power_on, hci_power_on);
1424 INIT_WORK(&hdev->power_off, hci_power_off);
1425 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1428
1429 atomic_set(&hdev->promisc, 0);
1430
1431 write_unlock_bh(&hci_dev_list_lock);
1432
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001433 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1434 if (!hdev->workqueue)
1435 goto nomem;
1436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001438 if (IS_ERR(hdev->tfm))
1439 BT_INFO("Failed to load transform for ecb(aes): %ld",
1440 PTR_ERR(hdev->tfm));
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 hci_register_sysfs(hdev);
1443
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001444 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1445 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1446 if (hdev->rfkill) {
1447 if (rfkill_register(hdev->rfkill) < 0) {
1448 rfkill_destroy(hdev->rfkill);
1449 hdev->rfkill = NULL;
1450 }
1451 }
1452
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001453 set_bit(HCI_AUTO_OFF, &hdev->flags);
1454 set_bit(HCI_SETUP, &hdev->flags);
1455 queue_work(hdev->workqueue, &hdev->power_on);
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 hci_notify(hdev, HCI_DEV_REG);
1458
1459 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001460
1461nomem:
1462 write_lock_bh(&hci_dev_list_lock);
1463 list_del(&hdev->list);
1464 write_unlock_bh(&hci_dev_list_lock);
1465
1466 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467}
1468EXPORT_SYMBOL(hci_register_dev);
1469
1470/* Unregister HCI device */
1471int hci_unregister_dev(struct hci_dev *hdev)
1472{
Marcel Holtmannef222012007-07-11 06:42:04 +02001473 int i;
1474
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001475 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 write_lock_bh(&hci_dev_list_lock);
1478 list_del(&hdev->list);
1479 write_unlock_bh(&hci_dev_list_lock);
1480
1481 hci_dev_do_close(hdev);
1482
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301483 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001484 kfree_skb(hdev->reassembly[i]);
1485
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001486 if (!test_bit(HCI_INIT, &hdev->flags) &&
1487 !test_bit(HCI_SETUP, &hdev->flags))
1488 mgmt_index_removed(hdev->id);
1489
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001490 if (!IS_ERR(hdev->tfm))
1491 crypto_free_blkcipher(hdev->tfm);
1492
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 hci_notify(hdev, HCI_DEV_UNREG);
1494
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001495 if (hdev->rfkill) {
1496 rfkill_unregister(hdev->rfkill);
1497 rfkill_destroy(hdev->rfkill);
1498 }
1499
Dave Young147e2d52008-03-05 18:45:59 -08001500 hci_unregister_sysfs(hdev);
1501
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001502 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001503 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001504
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001505 destroy_workqueue(hdev->workqueue);
1506
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001507 hci_dev_lock_bh(hdev);
1508 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001509 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001510 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001511 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001512 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001513 hci_dev_unlock_bh(hdev);
1514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return 0;
1518}
1519EXPORT_SYMBOL(hci_unregister_dev);
1520
1521/* Suspend HCI device */
1522int hci_suspend_dev(struct hci_dev *hdev)
1523{
1524 hci_notify(hdev, HCI_DEV_SUSPEND);
1525 return 0;
1526}
1527EXPORT_SYMBOL(hci_suspend_dev);
1528
1529/* Resume HCI device */
1530int hci_resume_dev(struct hci_dev *hdev)
1531{
1532 hci_notify(hdev, HCI_DEV_RESUME);
1533 return 0;
1534}
1535EXPORT_SYMBOL(hci_resume_dev);
1536
Marcel Holtmann76bca882009-11-18 00:40:39 +01001537/* Receive frame from HCI drivers */
1538int hci_recv_frame(struct sk_buff *skb)
1539{
1540 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1541 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1542 && !test_bit(HCI_INIT, &hdev->flags))) {
1543 kfree_skb(skb);
1544 return -ENXIO;
1545 }
1546
1547 /* Incomming skb */
1548 bt_cb(skb)->incoming = 1;
1549
1550 /* Time stamp */
1551 __net_timestamp(skb);
1552
1553 /* Queue frame for rx task */
1554 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001555 tasklet_schedule(&hdev->rx_task);
1556
Marcel Holtmann76bca882009-11-18 00:40:39 +01001557 return 0;
1558}
1559EXPORT_SYMBOL(hci_recv_frame);
1560
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301561static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001562 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301563{
1564 int len = 0;
1565 int hlen = 0;
1566 int remain = count;
1567 struct sk_buff *skb;
1568 struct bt_skb_cb *scb;
1569
1570 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1571 index >= NUM_REASSEMBLY)
1572 return -EILSEQ;
1573
1574 skb = hdev->reassembly[index];
1575
1576 if (!skb) {
1577 switch (type) {
1578 case HCI_ACLDATA_PKT:
1579 len = HCI_MAX_FRAME_SIZE;
1580 hlen = HCI_ACL_HDR_SIZE;
1581 break;
1582 case HCI_EVENT_PKT:
1583 len = HCI_MAX_EVENT_SIZE;
1584 hlen = HCI_EVENT_HDR_SIZE;
1585 break;
1586 case HCI_SCODATA_PKT:
1587 len = HCI_MAX_SCO_SIZE;
1588 hlen = HCI_SCO_HDR_SIZE;
1589 break;
1590 }
1591
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001592 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301593 if (!skb)
1594 return -ENOMEM;
1595
1596 scb = (void *) skb->cb;
1597 scb->expect = hlen;
1598 scb->pkt_type = type;
1599
1600 skb->dev = (void *) hdev;
1601 hdev->reassembly[index] = skb;
1602 }
1603
1604 while (count) {
1605 scb = (void *) skb->cb;
1606 len = min(scb->expect, (__u16)count);
1607
1608 memcpy(skb_put(skb, len), data, len);
1609
1610 count -= len;
1611 data += len;
1612 scb->expect -= len;
1613 remain = count;
1614
1615 switch (type) {
1616 case HCI_EVENT_PKT:
1617 if (skb->len == HCI_EVENT_HDR_SIZE) {
1618 struct hci_event_hdr *h = hci_event_hdr(skb);
1619 scb->expect = h->plen;
1620
1621 if (skb_tailroom(skb) < scb->expect) {
1622 kfree_skb(skb);
1623 hdev->reassembly[index] = NULL;
1624 return -ENOMEM;
1625 }
1626 }
1627 break;
1628
1629 case HCI_ACLDATA_PKT:
1630 if (skb->len == HCI_ACL_HDR_SIZE) {
1631 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1632 scb->expect = __le16_to_cpu(h->dlen);
1633
1634 if (skb_tailroom(skb) < scb->expect) {
1635 kfree_skb(skb);
1636 hdev->reassembly[index] = NULL;
1637 return -ENOMEM;
1638 }
1639 }
1640 break;
1641
1642 case HCI_SCODATA_PKT:
1643 if (skb->len == HCI_SCO_HDR_SIZE) {
1644 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1645 scb->expect = h->dlen;
1646
1647 if (skb_tailroom(skb) < scb->expect) {
1648 kfree_skb(skb);
1649 hdev->reassembly[index] = NULL;
1650 return -ENOMEM;
1651 }
1652 }
1653 break;
1654 }
1655
1656 if (scb->expect == 0) {
1657 /* Complete frame */
1658
1659 bt_cb(skb)->pkt_type = type;
1660 hci_recv_frame(skb);
1661
1662 hdev->reassembly[index] = NULL;
1663 return remain;
1664 }
1665 }
1666
1667 return remain;
1668}
1669
Marcel Holtmannef222012007-07-11 06:42:04 +02001670int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1671{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301672 int rem = 0;
1673
Marcel Holtmannef222012007-07-11 06:42:04 +02001674 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1675 return -EILSEQ;
1676
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001677 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001678 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301679 if (rem < 0)
1680 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001681
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301682 data += (count - rem);
1683 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001684 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001685
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301686 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001687}
1688EXPORT_SYMBOL(hci_recv_fragment);
1689
Suraj Sumangala99811512010-07-14 13:02:19 +05301690#define STREAM_REASSEMBLY 0
1691
1692int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1693{
1694 int type;
1695 int rem = 0;
1696
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001697 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301698 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1699
1700 if (!skb) {
1701 struct { char type; } *pkt;
1702
1703 /* Start of the frame */
1704 pkt = data;
1705 type = pkt->type;
1706
1707 data++;
1708 count--;
1709 } else
1710 type = bt_cb(skb)->pkt_type;
1711
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001712 rem = hci_reassembly(hdev, type, data, count,
1713 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301714 if (rem < 0)
1715 return rem;
1716
1717 data += (count - rem);
1718 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001719 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301720
1721 return rem;
1722}
1723EXPORT_SYMBOL(hci_recv_stream_fragment);
1724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725/* ---- Interface to upper protocols ---- */
1726
1727/* Register/Unregister protocols.
1728 * hci_task_lock is used to ensure that no tasks are running. */
1729int hci_register_proto(struct hci_proto *hp)
1730{
1731 int err = 0;
1732
1733 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1734
1735 if (hp->id >= HCI_MAX_PROTO)
1736 return -EINVAL;
1737
1738 write_lock_bh(&hci_task_lock);
1739
1740 if (!hci_proto[hp->id])
1741 hci_proto[hp->id] = hp;
1742 else
1743 err = -EEXIST;
1744
1745 write_unlock_bh(&hci_task_lock);
1746
1747 return err;
1748}
1749EXPORT_SYMBOL(hci_register_proto);
1750
1751int hci_unregister_proto(struct hci_proto *hp)
1752{
1753 int err = 0;
1754
1755 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1756
1757 if (hp->id >= HCI_MAX_PROTO)
1758 return -EINVAL;
1759
1760 write_lock_bh(&hci_task_lock);
1761
1762 if (hci_proto[hp->id])
1763 hci_proto[hp->id] = NULL;
1764 else
1765 err = -ENOENT;
1766
1767 write_unlock_bh(&hci_task_lock);
1768
1769 return err;
1770}
1771EXPORT_SYMBOL(hci_unregister_proto);
1772
1773int hci_register_cb(struct hci_cb *cb)
1774{
1775 BT_DBG("%p name %s", cb, cb->name);
1776
1777 write_lock_bh(&hci_cb_list_lock);
1778 list_add(&cb->list, &hci_cb_list);
1779 write_unlock_bh(&hci_cb_list_lock);
1780
1781 return 0;
1782}
1783EXPORT_SYMBOL(hci_register_cb);
1784
1785int hci_unregister_cb(struct hci_cb *cb)
1786{
1787 BT_DBG("%p name %s", cb, cb->name);
1788
1789 write_lock_bh(&hci_cb_list_lock);
1790 list_del(&cb->list);
1791 write_unlock_bh(&hci_cb_list_lock);
1792
1793 return 0;
1794}
1795EXPORT_SYMBOL(hci_unregister_cb);
1796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797int hci_register_amp(struct amp_mgr_cb *cb)
1798{
1799 BT_DBG("%p", cb);
1800
1801 write_lock_bh(&amp_mgr_cb_list_lock);
1802 list_add(&cb->list, &amp_mgr_cb_list);
1803 write_unlock_bh(&amp_mgr_cb_list_lock);
1804
1805 return 0;
1806}
1807EXPORT_SYMBOL(hci_register_amp);
1808
1809int hci_unregister_amp(struct amp_mgr_cb *cb)
1810{
1811 BT_DBG("%p", cb);
1812
1813 write_lock_bh(&amp_mgr_cb_list_lock);
1814 list_del(&cb->list);
1815 write_unlock_bh(&amp_mgr_cb_list_lock);
1816
1817 return 0;
1818}
1819EXPORT_SYMBOL(hci_unregister_amp);
1820
1821void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1822 struct sk_buff *skb)
1823{
1824 struct amp_mgr_cb *cb;
1825
1826 BT_DBG("opcode 0x%x", opcode);
1827
1828 read_lock_bh(&amp_mgr_cb_list_lock);
1829 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1830 if (cb->amp_cmd_complete_event)
1831 cb->amp_cmd_complete_event(hdev, opcode, skb);
1832 }
1833 read_unlock_bh(&amp_mgr_cb_list_lock);
1834}
1835
1836void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1837{
1838 struct amp_mgr_cb *cb;
1839
1840 BT_DBG("opcode 0x%x, status %d", opcode, status);
1841
1842 read_lock_bh(&amp_mgr_cb_list_lock);
1843 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1844 if (cb->amp_cmd_status_event)
1845 cb->amp_cmd_status_event(hdev, opcode, status);
1846 }
1847 read_unlock_bh(&amp_mgr_cb_list_lock);
1848}
1849
1850void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1851 struct sk_buff *skb)
1852{
1853 struct amp_mgr_cb *cb;
1854
1855 BT_DBG("ev_code 0x%x", ev_code);
1856
1857 read_lock_bh(&amp_mgr_cb_list_lock);
1858 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1859 if (cb->amp_event)
1860 cb->amp_event(hdev, ev_code, skb);
1861 }
1862 read_unlock_bh(&amp_mgr_cb_list_lock);
1863}
1864
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865static int hci_send_frame(struct sk_buff *skb)
1866{
1867 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1868
1869 if (!hdev) {
1870 kfree_skb(skb);
1871 return -ENODEV;
1872 }
1873
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001874 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
1876 if (atomic_read(&hdev->promisc)) {
1877 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001878 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001880 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 }
1882
1883 /* Get rid of skb owner, prior to sending to the driver. */
1884 skb_orphan(skb);
1885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 return hdev->send(skb);
1888}
1889
1890/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001891int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892{
1893 int len = HCI_COMMAND_HDR_SIZE + plen;
1894 struct hci_command_hdr *hdr;
1895 struct sk_buff *skb;
1896
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001897 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898
1899 skb = bt_skb_alloc(len, GFP_ATOMIC);
1900 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001901 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return -ENOMEM;
1903 }
1904
1905 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001906 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 hdr->plen = plen;
1908
1909 if (plen)
1910 memcpy(skb_put(skb, plen), param, plen);
1911
1912 BT_DBG("skb len %d", skb->len);
1913
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001914 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001916
Johan Hedberga5040ef2011-01-10 13:28:59 +02001917 if (test_bit(HCI_INIT, &hdev->flags))
1918 hdev->init_last_cmd = opcode;
1919
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001921 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
1923 return 0;
1924}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001925EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001928void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929{
1930 struct hci_command_hdr *hdr;
1931
1932 if (!hdev->sent_cmd)
1933 return NULL;
1934
1935 hdr = (void *) hdev->sent_cmd->data;
1936
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001937 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 return NULL;
1939
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001940 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1943}
1944
1945/* Send ACL data */
1946static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1947{
1948 struct hci_acl_hdr *hdr;
1949 int len = skb->len;
1950
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001951 skb_push(skb, HCI_ACL_HDR_SIZE);
1952 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001953 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001954 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1955 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956}
1957
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
1959 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960{
1961 struct hci_dev *hdev = conn->hdev;
1962 struct sk_buff *list;
1963
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001964 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001967 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 if (hdev->dev_type == HCI_BREDR)
1969 hci_add_acl_hdr(skb, conn->handle, flags);
1970 else
1971 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001973 list = skb_shinfo(skb)->frag_list;
1974 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 /* Non fragmented */
1976 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1977
1978 skb_queue_tail(&conn->data_q, skb);
1979 } else {
1980 /* Fragmented */
1981 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1982
1983 skb_shinfo(skb)->frag_list = NULL;
1984
1985 /* Queue all fragments atomically */
1986 spin_lock_bh(&conn->data_q.lock);
1987
1988 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001989 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001990 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 do {
1992 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001993
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001995 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001996 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1999
2000 __skb_queue_tail(&conn->data_q, skb);
2001 } while (list);
2002
2003 spin_unlock_bh(&conn->data_q.lock);
2004 }
2005
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002006 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007}
2008EXPORT_SYMBOL(hci_send_acl);
2009
2010/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002011void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012{
2013 struct hci_dev *hdev = conn->hdev;
2014 struct hci_sco_hdr hdr;
2015
2016 BT_DBG("%s len %d", hdev->name, skb->len);
2017
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002018 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 hdr.dlen = skb->len;
2020
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002021 skb_push(skb, HCI_SCO_HDR_SIZE);
2022 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002023 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
2025 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002026 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002029 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030}
2031EXPORT_SYMBOL(hci_send_sco);
2032
2033/* ---- HCI TX task (outgoing data) ---- */
2034
2035/* HCI Connection scheduler */
2036static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2037{
2038 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002039 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 int num = 0, min = ~0;
2041 struct list_head *p;
2042
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002043 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 * added and removed with TX task disabled. */
2045 list_for_each(p, &h->list) {
2046 struct hci_conn *c;
2047 c = list_entry(p, struct hci_conn, list);
2048
Marcel Holtmann769be972008-07-14 20:13:49 +02002049 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002051
2052 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2053 continue;
2054
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 num++;
2056
2057 if (c->sent < min) {
2058 min = c->sent;
2059 conn = c;
2060 }
2061 }
2062
2063 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002064 int cnt, q;
2065
2066 switch (conn->type) {
2067 case ACL_LINK:
2068 cnt = hdev->acl_cnt;
2069 break;
2070 case SCO_LINK:
2071 case ESCO_LINK:
2072 cnt = hdev->sco_cnt;
2073 break;
2074 case LE_LINK:
2075 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2076 break;
2077 default:
2078 cnt = 0;
2079 BT_ERR("Unknown link type");
2080 }
2081
2082 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 *quote = q ? q : 1;
2084 } else
2085 *quote = 0;
2086
2087 BT_DBG("conn %p quote %d", conn, *quote);
2088 return conn;
2089}
2090
Ville Tervobae1f5d2011-02-10 22:38:53 -03002091static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
2093 struct hci_conn_hash *h = &hdev->conn_hash;
2094 struct list_head *p;
2095 struct hci_conn *c;
2096
Ville Tervobae1f5d2011-02-10 22:38:53 -03002097 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 /* Kill stalled connections */
2100 list_for_each(p, &h->list) {
2101 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002102 if (c->type == type && c->sent) {
2103 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 hdev->name, batostr(&c->dst));
2105 hci_acl_disconn(c, 0x13);
2106 }
2107 }
2108}
2109
2110static inline void hci_sched_acl(struct hci_dev *hdev)
2111{
2112 struct hci_conn *conn;
2113 struct sk_buff *skb;
2114 int quote;
2115
2116 BT_DBG("%s", hdev->name);
2117
2118 if (!test_bit(HCI_RAW, &hdev->flags)) {
2119 /* ACL tx timeout must be longer than maximum
2120 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002121 if (hdev->acl_cnt <= 0 &&
2122 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002123 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 }
2125
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002126 while (hdev->acl_cnt > 0 &&
2127 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2128 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2129 int count = 1;
2130
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 if (hdev->flow_ctl_mode ==
2134 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2135 /* Calculate count of blocks used by
2136 * this packet
2137 */
2138 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2139 hdev->data_block_len) + 1;
2140
2141 if (count > hdev->acl_cnt)
2142 return;
2143
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002144 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 hci_send_frame(skb);
2147 hdev->acl_last_tx = jiffies;
2148
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 hdev->acl_cnt -= count;
2150 quote -= count;
2151
2152 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 }
2154 }
2155}
2156
2157/* Schedule SCO */
2158static inline void hci_sched_sco(struct hci_dev *hdev)
2159{
2160 struct hci_conn *conn;
2161 struct sk_buff *skb;
2162 int quote;
2163
2164 BT_DBG("%s", hdev->name);
2165
2166 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2167 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2168 BT_DBG("skb %p len %d", skb, skb->len);
2169 hci_send_frame(skb);
2170
2171 conn->sent++;
2172 if (conn->sent == ~0)
2173 conn->sent = 0;
2174 }
2175 }
2176}
2177
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002178static inline void hci_sched_esco(struct hci_dev *hdev)
2179{
2180 struct hci_conn *conn;
2181 struct sk_buff *skb;
2182 int quote;
2183
2184 BT_DBG("%s", hdev->name);
2185
2186 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2187 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2188 BT_DBG("skb %p len %d", skb, skb->len);
2189 hci_send_frame(skb);
2190
2191 conn->sent++;
2192 if (conn->sent == ~0)
2193 conn->sent = 0;
2194 }
2195 }
2196}
2197
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002198static inline void hci_sched_le(struct hci_dev *hdev)
2199{
2200 struct hci_conn *conn;
2201 struct sk_buff *skb;
2202 int quote, cnt;
2203
2204 BT_DBG("%s", hdev->name);
2205
2206 if (!test_bit(HCI_RAW, &hdev->flags)) {
2207 /* LE tx timeout must be longer than maximum
2208 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002209 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002210 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002211 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002212 }
2213
2214 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2215 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2216 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2217 BT_DBG("skb %p len %d", skb, skb->len);
2218
2219 hci_send_frame(skb);
2220 hdev->le_last_tx = jiffies;
2221
2222 cnt--;
2223 conn->sent++;
2224 }
2225 }
2226 if (hdev->le_pkts)
2227 hdev->le_cnt = cnt;
2228 else
2229 hdev->acl_cnt = cnt;
2230}
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232static void hci_tx_task(unsigned long arg)
2233{
2234 struct hci_dev *hdev = (struct hci_dev *) arg;
2235 struct sk_buff *skb;
2236
2237 read_lock(&hci_task_lock);
2238
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002239 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2240 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 /* Schedule queues and send stuff to HCI driver */
2243
2244 hci_sched_acl(hdev);
2245
2246 hci_sched_sco(hdev);
2247
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002248 hci_sched_esco(hdev);
2249
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002250 hci_sched_le(hdev);
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 /* Send next queued raw (unknown type) packet */
2253 while ((skb = skb_dequeue(&hdev->raw_q)))
2254 hci_send_frame(skb);
2255
2256 read_unlock(&hci_task_lock);
2257}
2258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002259/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261/* ACL data packet */
2262static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2263{
2264 struct hci_acl_hdr *hdr = (void *) skb->data;
2265 struct hci_conn *conn;
2266 __u16 handle, flags;
2267
2268 skb_pull(skb, HCI_ACL_HDR_SIZE);
2269
2270 handle = __le16_to_cpu(hdr->handle);
2271 flags = hci_flags(handle);
2272 handle = hci_handle(handle);
2273
2274 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2275
2276 hdev->stat.acl_rx++;
2277
2278 hci_dev_lock(hdev);
2279 conn = hci_conn_hash_lookup_handle(hdev, handle);
2280 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 if (conn) {
2283 register struct hci_proto *hp;
2284
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002285 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002286
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002288 hp = hci_proto[HCI_PROTO_L2CAP];
2289 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 hp->recv_acldata(conn, skb, flags);
2291 return;
2292 }
2293 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002294 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 hdev->name, handle);
2296 }
2297
2298 kfree_skb(skb);
2299}
2300
2301/* SCO data packet */
2302static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2303{
2304 struct hci_sco_hdr *hdr = (void *) skb->data;
2305 struct hci_conn *conn;
2306 __u16 handle;
2307
2308 skb_pull(skb, HCI_SCO_HDR_SIZE);
2309
2310 handle = __le16_to_cpu(hdr->handle);
2311
2312 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2313
2314 hdev->stat.sco_rx++;
2315
2316 hci_dev_lock(hdev);
2317 conn = hci_conn_hash_lookup_handle(hdev, handle);
2318 hci_dev_unlock(hdev);
2319
2320 if (conn) {
2321 register struct hci_proto *hp;
2322
2323 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002324 hp = hci_proto[HCI_PROTO_SCO];
2325 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 hp->recv_scodata(conn, skb);
2327 return;
2328 }
2329 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002330 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 hdev->name, handle);
2332 }
2333
2334 kfree_skb(skb);
2335}
2336
Marcel Holtmann65164552005-10-28 19:20:48 +02002337static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338{
2339 struct hci_dev *hdev = (struct hci_dev *) arg;
2340 struct sk_buff *skb;
2341
2342 BT_DBG("%s", hdev->name);
2343
2344 read_lock(&hci_task_lock);
2345
2346 while ((skb = skb_dequeue(&hdev->rx_q))) {
2347 if (atomic_read(&hdev->promisc)) {
2348 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002349 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 }
2351
2352 if (test_bit(HCI_RAW, &hdev->flags)) {
2353 kfree_skb(skb);
2354 continue;
2355 }
2356
2357 if (test_bit(HCI_INIT, &hdev->flags)) {
2358 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002359 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 case HCI_ACLDATA_PKT:
2361 case HCI_SCODATA_PKT:
2362 kfree_skb(skb);
2363 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 }
2366
2367 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002368 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 case HCI_EVENT_PKT:
2370 hci_event_packet(hdev, skb);
2371 break;
2372
2373 case HCI_ACLDATA_PKT:
2374 BT_DBG("%s ACL data packet", hdev->name);
2375 hci_acldata_packet(hdev, skb);
2376 break;
2377
2378 case HCI_SCODATA_PKT:
2379 BT_DBG("%s SCO data packet", hdev->name);
2380 hci_scodata_packet(hdev, skb);
2381 break;
2382
2383 default:
2384 kfree_skb(skb);
2385 break;
2386 }
2387 }
2388
2389 read_unlock(&hci_task_lock);
2390}
2391
2392static void hci_cmd_task(unsigned long arg)
2393{
2394 struct hci_dev *hdev = (struct hci_dev *) arg;
2395 struct sk_buff *skb;
2396
2397 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002400 if (atomic_read(&hdev->cmd_cnt)) {
2401 skb = skb_dequeue(&hdev->cmd_q);
2402 if (!skb)
2403 return;
2404
Wei Yongjun7585b972009-02-25 18:29:52 +08002405 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002407 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2408 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 atomic_dec(&hdev->cmd_cnt);
2410 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002411 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002412 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 } else {
2414 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002415 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 }
2417 }
2418}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002419
2420module_param(enable_smp, bool, 0644);
2421MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");