blob: 65af37f5605f6d624c6ec748e1c9cd6436987cd8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
200{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200201 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800203 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200204 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Driver initialization */
209
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100216 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 skb_queue_purge(&hdev->driver_init);
219
220 /* Mandatory initialization */
221
222 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
224 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 /* Set default HCI Flow Control Mode */
233 if (hdev->dev_type == HCI_BREDR)
234 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
235 else
236 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
237
238 /* Read HCI Flow Control Mode */
239 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200242 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
245 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#if 0
248 /* Host buffer size */
249 {
250 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700251 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 cp.acl_max_pkt = cpu_to_le16(0xffff);
254 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257#endif
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (hdev->dev_type == HCI_BREDR) {
260 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 /* Read Local Supported Features */
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 /* Read BD Address */
266 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 /* Read Class of Device */
269 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Read Local Name */
272 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 /* Read Voice Setting */
275 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 /* Optional initialization */
278 /* Clear Event Filters */
279 flt_type = HCI_FLT_CLEAR_ALL;
280 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 /* Connection accept timeout ~20 secs */
283 param = cpu_to_le16(0x7d00);
284 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
285
286 bacpy(&cp.bdaddr, BDADDR_ANY);
287 cp.delete_all = 1;
288 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
289 sizeof(cp), &cp);
290 } else {
291 /* AMP initialization */
292 /* Connection accept timeout ~5 secs */
293 param = cpu_to_le16(0x1f40);
294 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
295
296 /* Read AMP Info */
297 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300301static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
302{
303 BT_DBG("%s", hdev->name);
304
305 /* Read LE buffer size */
306 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 scan = opt;
312
313 BT_DBG("%s %x", hdev->name, scan);
314
315 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 auth = opt;
322
323 BT_DBG("%s %x", hdev->name, auth);
324
325 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __u8 encrypt = opt;
332
333 BT_DBG("%s %x", hdev->name, encrypt);
334
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200335 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200336 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
340{
341 __le16 policy = cpu_to_le16(opt);
342
Marcel Holtmanna418b892008-11-30 12:17:28 +0100343 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200344
345 /* Default link policy */
346 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
347}
348
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900349/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * Device is held on return. */
351struct hci_dev *hci_dev_get(int index)
352{
353 struct hci_dev *hdev = NULL;
354 struct list_head *p;
355
356 BT_DBG("%d", index);
357
358 if (index < 0)
359 return NULL;
360
361 read_lock(&hci_dev_list_lock);
362 list_for_each(p, &hci_dev_list) {
363 struct hci_dev *d = list_entry(p, struct hci_dev, list);
364 if (d->id == index) {
365 hdev = hci_dev_hold(d);
366 break;
367 }
368 }
369 read_unlock(&hci_dev_list_lock);
370 return hdev;
371}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374/* ---- Inquiry support ---- */
375static void inquiry_cache_flush(struct hci_dev *hdev)
376{
377 struct inquiry_cache *cache = &hdev->inq_cache;
378 struct inquiry_entry *next = cache->list, *e;
379
380 BT_DBG("cache %p", cache);
381
382 cache->list = NULL;
383 while ((e = next)) {
384 next = e->next;
385 kfree(e);
386 }
387}
388
389struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_entry *e;
393
394 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
395
396 for (e = cache->list; e; e = e->next)
397 if (!bacmp(&e->data.bdaddr, bdaddr))
398 break;
399 return e;
400}
401
402void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
403{
404 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200409 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
410 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
413 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200415
416 ie->next = cache->list;
417 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200420 memcpy(&ie->data, data, sizeof(*data));
421 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 cache->timestamp = jiffies;
423}
424
425static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
426{
427 struct inquiry_cache *cache = &hdev->inq_cache;
428 struct inquiry_info *info = (struct inquiry_info *) buf;
429 struct inquiry_entry *e;
430 int copied = 0;
431
432 for (e = cache->list; e && copied < num; e = e->next, copied++) {
433 struct inquiry_data *data = &e->data;
434 bacpy(&info->bdaddr, &data->bdaddr);
435 info->pscan_rep_mode = data->pscan_rep_mode;
436 info->pscan_period_mode = data->pscan_period_mode;
437 info->pscan_mode = data->pscan_mode;
438 memcpy(info->dev_class, data->dev_class, 3);
439 info->clock_offset = data->clock_offset;
440 info++;
441 }
442
443 BT_DBG("cache %p, copied %d", cache, copied);
444 return copied;
445}
446
447static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
448{
449 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
450 struct hci_cp_inquiry cp;
451
452 BT_DBG("%s", hdev->name);
453
454 if (test_bit(HCI_INQUIRY, &hdev->flags))
455 return;
456
457 /* Start Inquiry */
458 memcpy(&cp.lap, &ir->lap, 3);
459 cp.length = ir->length;
460 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200461 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464int hci_inquiry(void __user *arg)
465{
466 __u8 __user *ptr = arg;
467 struct hci_inquiry_req ir;
468 struct hci_dev *hdev;
469 int err = 0, do_inquiry = 0, max_rsp;
470 long timeo;
471 __u8 *buf;
472
473 if (copy_from_user(&ir, ptr, sizeof(ir)))
474 return -EFAULT;
475
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200476 hdev = hci_dev_get(ir.dev_id);
477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -ENODEV;
479
480 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200482 inquiry_cache_empty(hdev) ||
483 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 inquiry_cache_flush(hdev);
485 do_inquiry = 1;
486 }
487 hci_dev_unlock_bh(hdev);
488
Marcel Holtmann04837f62006-07-03 10:02:33 +0200489 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490
491 if (do_inquiry) {
492 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
493 if (err < 0)
494 goto done;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* for unlimited number of responses we will use buffer with 255 entries */
498 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
499
500 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
501 * copy it to the user space.
502 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100503 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 err = -ENOMEM;
506 goto done;
507 }
508
509 hci_dev_lock_bh(hdev);
510 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
511 hci_dev_unlock_bh(hdev);
512
513 BT_DBG("num_rsp %d", ir.num_rsp);
514
515 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
516 ptr += sizeof(ir);
517 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
518 ir.num_rsp))
519 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900520 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 err = -EFAULT;
522
523 kfree(buf);
524
525done:
526 hci_dev_put(hdev);
527 return err;
528}
529
530/* ---- HCI ioctl helpers ---- */
531
532int hci_dev_open(__u16 dev)
533{
534 struct hci_dev *hdev;
535 int ret = 0;
536
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200537 hdev = hci_dev_get(dev);
538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return -ENODEV;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_lock(hdev);
544
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200545 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
546 ret = -ERFKILL;
547 goto done;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (test_bit(HCI_UP, &hdev->flags)) {
551 ret = -EALREADY;
552 goto done;
553 }
554
555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
556 set_bit(HCI_RAW, &hdev->flags);
557
558 if (hdev->open(hdev)) {
559 ret = -EIO;
560 goto done;
561 }
562
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 atomic_set(&hdev->cmd_cnt, 1);
565 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200566 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Marcel Holtmann04837f62006-07-03 10:02:33 +0200568 ret = __hci_request(hdev, hci_init_req, 0,
569 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300572 ret = __hci_request(hdev, hci_le_init_req, 0,
573 msecs_to_jiffies(HCI_INIT_TIMEOUT));
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 clear_bit(HCI_INIT, &hdev->flags);
576 }
577
578 if (!ret) {
579 hci_dev_hold(hdev);
580 set_bit(HCI_UP, &hdev->flags);
581 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200582 if (!test_bit(HCI_SETUP, &hdev->flags))
583 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900584 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 /* Init failed, cleanup */
586 tasklet_kill(&hdev->rx_task);
587 tasklet_kill(&hdev->tx_task);
588 tasklet_kill(&hdev->cmd_task);
589
590 skb_queue_purge(&hdev->cmd_q);
591 skb_queue_purge(&hdev->rx_q);
592
593 if (hdev->flush)
594 hdev->flush(hdev);
595
596 if (hdev->sent_cmd) {
597 kfree_skb(hdev->sent_cmd);
598 hdev->sent_cmd = NULL;
599 }
600
601 hdev->close(hdev);
602 hdev->flags = 0;
603 }
604
605done:
606 hci_req_unlock(hdev);
607 hci_dev_put(hdev);
608 return ret;
609}
610
611static int hci_dev_do_close(struct hci_dev *hdev)
612{
613 BT_DBG("%s %p", hdev->name, hdev);
614
615 hci_req_cancel(hdev, ENODEV);
616 hci_req_lock(hdev);
617
618 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300619 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 hci_req_unlock(hdev);
621 return 0;
622 }
623
624 /* Kill RX and TX tasks */
625 tasklet_kill(&hdev->rx_task);
626 tasklet_kill(&hdev->tx_task);
627
628 hci_dev_lock_bh(hdev);
629 inquiry_cache_flush(hdev);
630 hci_conn_hash_flush(hdev);
631 hci_dev_unlock_bh(hdev);
632
633 hci_notify(hdev, HCI_DEV_DOWN);
634
635 if (hdev->flush)
636 hdev->flush(hdev);
637
638 /* Reset device */
639 skb_queue_purge(&hdev->cmd_q);
640 atomic_set(&hdev->cmd_cnt, 1);
641 if (!test_bit(HCI_RAW, &hdev->flags)) {
642 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200643 __hci_request(hdev, hci_reset_req, 0,
644 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 clear_bit(HCI_INIT, &hdev->flags);
646 }
647
648 /* Kill cmd task */
649 tasklet_kill(&hdev->cmd_task);
650
651 /* Drop queues */
652 skb_queue_purge(&hdev->rx_q);
653 skb_queue_purge(&hdev->cmd_q);
654 skb_queue_purge(&hdev->raw_q);
655
656 /* Drop last sent command */
657 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300658 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 kfree_skb(hdev->sent_cmd);
660 hdev->sent_cmd = NULL;
661 }
662
663 /* After this point our queues are empty
664 * and no tasks are scheduled. */
665 hdev->close(hdev);
666
Johan Hedberg5add6af2010-12-16 10:00:37 +0200667 mgmt_powered(hdev->id, 0);
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 /* Clear flags */
670 hdev->flags = 0;
671
672 hci_req_unlock(hdev);
673
674 hci_dev_put(hdev);
675 return 0;
676}
677
678int hci_dev_close(__u16 dev)
679{
680 struct hci_dev *hdev;
681 int err;
682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200683 hdev = hci_dev_get(dev);
684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return -ENODEV;
686 err = hci_dev_do_close(hdev);
687 hci_dev_put(hdev);
688 return err;
689}
690
691int hci_dev_reset(__u16 dev)
692{
693 struct hci_dev *hdev;
694 int ret = 0;
695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200696 hdev = hci_dev_get(dev);
697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return -ENODEV;
699
700 hci_req_lock(hdev);
701 tasklet_disable(&hdev->tx_task);
702
703 if (!test_bit(HCI_UP, &hdev->flags))
704 goto done;
705
706 /* Drop queues */
707 skb_queue_purge(&hdev->rx_q);
708 skb_queue_purge(&hdev->cmd_q);
709
710 hci_dev_lock_bh(hdev);
711 inquiry_cache_flush(hdev);
712 hci_conn_hash_flush(hdev);
713 hci_dev_unlock_bh(hdev);
714
715 if (hdev->flush)
716 hdev->flush(hdev);
717
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900718 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300719 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200722 ret = __hci_request(hdev, hci_reset_req, 0,
723 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725done:
726 tasklet_enable(&hdev->tx_task);
727 hci_req_unlock(hdev);
728 hci_dev_put(hdev);
729 return ret;
730}
731
732int hci_dev_reset_stat(__u16 dev)
733{
734 struct hci_dev *hdev;
735 int ret = 0;
736
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200737 hdev = hci_dev_get(dev);
738 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 return -ENODEV;
740
741 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
742
743 hci_dev_put(hdev);
744
745 return ret;
746}
747
748int hci_dev_cmd(unsigned int cmd, void __user *arg)
749{
750 struct hci_dev *hdev;
751 struct hci_dev_req dr;
752 int err = 0;
753
754 if (copy_from_user(&dr, arg, sizeof(dr)))
755 return -EFAULT;
756
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200757 hdev = hci_dev_get(dr.dev_id);
758 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return -ENODEV;
760
761 switch (cmd) {
762 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200763 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766
767 case HCISETENCRYPT:
768 if (!lmp_encrypt_capable(hdev)) {
769 err = -EOPNOTSUPP;
770 break;
771 }
772
773 if (!test_bit(HCI_AUTH, &hdev->flags)) {
774 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200775 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (err)
778 break;
779 }
780
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200786 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 break;
789
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200790 case HCISETLINKPOL:
791 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
793 break;
794
795 case HCISETLINKMODE:
796 hdev->link_mode = ((__u16) dr.dev_opt) &
797 (HCI_LM_MASTER | HCI_LM_ACCEPT);
798 break;
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 case HCISETPTYPE:
801 hdev->pkt_type = (__u16) dr.dev_opt;
802 break;
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200805 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
806 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 break;
808
809 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200810 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 break;
813
814 default:
815 err = -EINVAL;
816 break;
817 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 hci_dev_put(hdev);
820 return err;
821}
822
823int hci_get_dev_list(void __user *arg)
824{
825 struct hci_dev_list_req *dl;
826 struct hci_dev_req *dr;
827 struct list_head *p;
828 int n = 0, size, err;
829 __u16 dev_num;
830
831 if (get_user(dev_num, (__u16 __user *) arg))
832 return -EFAULT;
833
834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
835 return -EINVAL;
836
837 size = sizeof(*dl) + dev_num * sizeof(*dr);
838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200839 dl = kzalloc(size, GFP_KERNEL);
840 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENOMEM;
842
843 dr = dl->dev_req;
844
845 read_lock_bh(&hci_dev_list_lock);
846 list_for_each(p, &hci_dev_list) {
847 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200850
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200851 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200852
853 if (!test_bit(HCI_MGMT, &hdev->flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags);
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (++n >= dev_num)
860 break;
861 }
862 read_unlock_bh(&hci_dev_list_lock);
863
864 dl->dev_num = n;
865 size = sizeof(*dl) + n * sizeof(*dr);
866
867 err = copy_to_user(arg, dl, size);
868 kfree(dl);
869
870 return err ? -EFAULT : 0;
871}
872
873int hci_get_dev_info(void __user *arg)
874{
875 struct hci_dev *hdev;
876 struct hci_dev_info di;
877 int err = 0;
878
879 if (copy_from_user(&di, arg, sizeof(di)))
880 return -EFAULT;
881
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200882 hdev = hci_dev_get(di.dev_id);
883 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return -ENODEV;
885
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200886 hci_del_off_timer(hdev);
887
Johan Hedbergc542a062011-01-26 13:11:03 +0200888 if (!test_bit(HCI_MGMT, &hdev->flags))
889 set_bit(HCI_PAIRABLE, &hdev->flags);
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 strcpy(di.name, hdev->name);
892 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100893 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 di.flags = hdev->flags;
895 di.pkt_type = hdev->pkt_type;
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 di.link_policy = hdev->link_policy;
901 di.link_mode = hdev->link_mode;
902
903 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
904 memcpy(&di.features, &hdev->features, sizeof(di.features));
905
906 if (copy_to_user(arg, &di, sizeof(di)))
907 err = -EFAULT;
908
909 hci_dev_put(hdev);
910
911 return err;
912}
913
914/* ---- Interface to HCI drivers ---- */
915
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200916static int hci_rfkill_set_block(void *data, bool blocked)
917{
918 struct hci_dev *hdev = data;
919
920 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
921
922 if (!blocked)
923 return 0;
924
925 hci_dev_do_close(hdev);
926
927 return 0;
928}
929
930static const struct rfkill_ops hci_rfkill_ops = {
931 .set_block = hci_rfkill_set_block,
932};
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934/* Alloc HCI device */
935struct hci_dev *hci_alloc_dev(void)
936{
937 struct hci_dev *hdev;
938
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200939 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (!hdev)
941 return NULL;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 skb_queue_head_init(&hdev->driver_init);
944
945 return hdev;
946}
947EXPORT_SYMBOL(hci_alloc_dev);
948
949/* Free HCI device */
950void hci_free_dev(struct hci_dev *hdev)
951{
952 skb_queue_purge(&hdev->driver_init);
953
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200954 /* will free via device release */
955 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957EXPORT_SYMBOL(hci_free_dev);
958
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200959static void hci_power_on(struct work_struct *work)
960{
961 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
962
963 BT_DBG("%s", hdev->name);
964
Brian Gixa68668b2011-08-11 15:49:36 -0700965 if (hci_dev_open(hdev->id) < 0 && !test_bit(HCI_UP, &hdev->flags))
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200966 return;
967
968 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
969 mod_timer(&hdev->off_timer,
970 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
971
972 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
973 mgmt_index_added(hdev->id);
974}
975
976static void hci_power_off(struct work_struct *work)
977{
978 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_close(hdev->id);
983}
984
985static void hci_auto_off(unsigned long data)
986{
987 struct hci_dev *hdev = (struct hci_dev *) data;
988
989 BT_DBG("%s", hdev->name);
990
991 clear_bit(HCI_AUTO_OFF, &hdev->flags);
992
993 queue_work(hdev->workqueue, &hdev->power_off);
994}
995
996void hci_del_off_timer(struct hci_dev *hdev)
997{
998 BT_DBG("%s", hdev->name);
999
1000 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1001 del_timer(&hdev->off_timer);
1002}
1003
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001004int hci_uuids_clear(struct hci_dev *hdev)
1005{
1006 struct list_head *p, *n;
1007
1008 list_for_each_safe(p, n, &hdev->uuids) {
1009 struct bt_uuid *uuid;
1010
1011 uuid = list_entry(p, struct bt_uuid, list);
1012
1013 list_del(p);
1014 kfree(uuid);
1015 }
1016
1017 return 0;
1018}
1019
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001020int hci_link_keys_clear(struct hci_dev *hdev)
1021{
1022 struct list_head *p, *n;
1023
1024 list_for_each_safe(p, n, &hdev->link_keys) {
1025 struct link_key *key;
1026
1027 key = list_entry(p, struct link_key, list);
1028
1029 list_del(p);
1030 kfree(key);
1031 }
1032
1033 return 0;
1034}
1035
1036struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1037{
1038 struct list_head *p;
1039
1040 list_for_each(p, &hdev->link_keys) {
1041 struct link_key *k;
1042
1043 k = list_entry(p, struct link_key, list);
1044
1045 if (bacmp(bdaddr, &k->bdaddr) == 0)
1046 return k;
1047 }
1048
1049 return NULL;
1050}
1051
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001052struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 list_for_each(p, &hdev->link_keys) {
1057 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001058 struct key_master_id *id;
1059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 k = list_entry(p, struct link_key, list);
1061
1062 if (k->type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001063 continue;
1064
1065 if (k->dlen != sizeof(*id))
1066 continue;
1067
1068 id = (void *) &k->data;
1069 if (id->ediv == ediv &&
1070 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1071 return k;
1072 }
1073
1074 return NULL;
1075}
1076EXPORT_SYMBOL(hci_find_ltk);
1077
1078struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1079 bdaddr_t *bdaddr, u8 type)
1080{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083 list_for_each(p, &hdev->link_keys) {
1084 struct link_key *k;
1085
1086 k = list_entry(p, struct link_key, list);
1087
1088 if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001089 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1097 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098{
1099 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001100 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001102 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001103
1104 old_key = hci_find_link_key(hdev, bdaddr);
1105 if (old_key) {
1106 old_key_type = old_key->type;
1107 key = old_key;
1108 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001110 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1111 if (!key)
1112 return -ENOMEM;
1113 list_add(&key->list, &hdev->link_keys);
1114 }
1115
1116 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1117
1118 bacpy(&key->bdaddr, bdaddr);
1119 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001120 key->auth = 0x01;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 key->type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001122 key->pin_len = pin_len;
1123
Brian Gixa68668b2011-08-11 15:49:36 -07001124 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1125
1126 if (conn && (conn->auth_type > 0x01 || conn->remote_auth > 0x01))
1127 bonded = 1;
1128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001130 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131
1132 if (type == 0x06)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001133 key->type = old_key_type;
1134
1135 return 0;
1136}
1137
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001138int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixa68668b2011-08-11 15:49:36 -07001139 u8 key_size, u8 auth, __le16 ediv, u8 rand[8],
1140 u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001141{
1142 struct link_key *key, *old_key;
1143 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001144
Brian Gixa68668b2011-08-11 15:49:36 -07001145 BT_DBG("%s Auth: %2.2X addr %s", hdev->name, auth, batostr(bdaddr));
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001146
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001147 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001148 if (old_key) {
1149 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001150 } else {
1151 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1152 if (!key)
1153 return -ENOMEM;
1154 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001155 }
1156
1157 key->dlen = sizeof(*id);
1158
1159 bacpy(&key->bdaddr, bdaddr);
1160 memcpy(key->val, ltk, sizeof(key->val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001161 key->type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001162 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001163 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001164
1165 id = (void *) &key->data;
1166 id->ediv = ediv;
1167 memcpy(id->rand, rand, sizeof(id->rand));
1168
1169 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001170 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001171
1172 return 0;
1173}
1174
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001175int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176{
1177 struct link_key *key;
1178
1179 key = hci_find_link_key(hdev, bdaddr);
1180 if (!key)
1181 return -ENOENT;
1182
1183 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1184
1185 list_del(&key->list);
1186 kfree(key);
1187
1188 return 0;
1189}
1190
Ville Tervo6bd32322011-02-16 16:32:41 +02001191/* HCI command timer function */
1192static void hci_cmd_timer(unsigned long arg)
1193{
1194 struct hci_dev *hdev = (void *) arg;
1195
1196 BT_ERR("%s command tx timeout", hdev->name);
1197 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001199 tasklet_schedule(&hdev->cmd_task);
1200}
1201
Szymon Janc2763eda2011-03-22 13:12:22 +01001202struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1203 bdaddr_t *bdaddr)
1204{
1205 struct oob_data *data;
1206
1207 list_for_each_entry(data, &hdev->remote_oob_data, list)
1208 if (bacmp(bdaddr, &data->bdaddr) == 0)
1209 return data;
1210
1211 return NULL;
1212}
1213
1214int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1215{
1216 struct oob_data *data;
1217
1218 data = hci_find_remote_oob_data(hdev, bdaddr);
1219 if (!data)
1220 return -ENOENT;
1221
1222 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1223
1224 list_del(&data->list);
1225 kfree(data);
1226
1227 return 0;
1228}
1229
1230int hci_remote_oob_data_clear(struct hci_dev *hdev)
1231{
1232 struct oob_data *data, *n;
1233
1234 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1235 list_del(&data->list);
1236 kfree(data);
1237 }
1238
1239 return 0;
1240}
1241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001242static void hci_adv_clear(unsigned long arg)
1243{
1244 struct hci_dev *hdev = (void *) arg;
1245
1246 hci_adv_entries_clear(hdev);
1247}
1248
1249int hci_adv_entries_clear(struct hci_dev *hdev)
1250{
1251 struct list_head *p, *n;
1252
Brian Gixa68668b2011-08-11 15:49:36 -07001253 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001254 write_lock_bh(&hdev->adv_entries_lock);
1255
1256 list_for_each_safe(p, n, &hdev->adv_entries) {
1257 struct adv_entry *entry;
1258
1259 entry = list_entry(p, struct adv_entry, list);
1260
1261 list_del(p);
1262 kfree(entry);
1263 }
1264
1265 write_unlock_bh(&hdev->adv_entries_lock);
1266
1267 return 0;
1268}
1269
1270struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1271{
1272 struct list_head *p;
1273 struct adv_entry *res = NULL;
1274
Brian Gixa68668b2011-08-11 15:49:36 -07001275 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001276 read_lock_bh(&hdev->adv_entries_lock);
1277
1278 list_for_each(p, &hdev->adv_entries) {
1279 struct adv_entry *entry;
1280
1281 entry = list_entry(p, struct adv_entry, list);
1282
1283 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1284 res = entry;
1285 goto out;
1286 }
1287 }
1288out:
1289 read_unlock_bh(&hdev->adv_entries_lock);
1290 return res;
1291}
1292
1293static inline int is_connectable_adv(u8 evt_type)
1294{
1295 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1296 return 1;
1297
1298 return 0;
1299}
1300
Szymon Janc2763eda2011-03-22 13:12:22 +01001301int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1302 u8 *randomizer)
1303{
1304 struct oob_data *data;
1305
1306 data = hci_find_remote_oob_data(hdev, bdaddr);
1307
1308 if (!data) {
1309 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1310 if (!data)
1311 return -ENOMEM;
1312
1313 bacpy(&data->bdaddr, bdaddr);
1314 list_add(&data->list, &hdev->remote_oob_data);
1315 }
1316
1317 memcpy(data->hash, hash, sizeof(data->hash));
1318 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1319
1320 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1321
1322 return 0;
1323}
1324
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001325int hci_add_adv_entry(struct hci_dev *hdev,
1326 struct hci_ev_le_advertising_info *ev)
1327{
1328 struct adv_entry *entry;
1329
Brian Gixa68668b2011-08-11 15:49:36 -07001330 BT_DBG("");
1331
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001332 if (!is_connectable_adv(ev->evt_type))
1333 return -EINVAL;
1334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001335 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001336 /* Only new entries should be added to adv_entries. So, if
1337 * bdaddr was found, don't add it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 if (entry)
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001339 return 0;
1340
1341 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1342 if (!entry)
1343 return -ENOMEM;
1344
1345 bacpy(&entry->bdaddr, &ev->bdaddr);
1346 entry->bdaddr_type = ev->bdaddr_type;
1347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001349 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001351
1352 return 0;
1353}
1354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001355static struct crypto_blkcipher *alloc_cypher(void)
1356{
1357 if (enable_smp)
1358 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1359
1360 return ERR_PTR(-ENOTSUPP);
1361}
1362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363/* Register HCI device */
1364int hci_register_dev(struct hci_dev *hdev)
1365{
1366 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001367 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001369 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1370 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 if (!hdev->open || !hdev->close || !hdev->destruct)
1373 return -EINVAL;
1374
1375 write_lock_bh(&hci_dev_list_lock);
1376
1377 /* Find first available device id */
1378 list_for_each(p, &hci_dev_list) {
1379 if (list_entry(p, struct hci_dev, list)->id != id)
1380 break;
1381 head = p; id++;
1382 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001383
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 sprintf(hdev->name, "hci%d", id);
1385 hdev->id = id;
1386 list_add(&hdev->list, head);
1387
1388 atomic_set(&hdev->refcnt, 1);
1389 spin_lock_init(&hdev->lock);
1390
1391 hdev->flags = 0;
1392 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001393 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001395 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Marcel Holtmann04837f62006-07-03 10:02:33 +02001397 hdev->idle_timeout = 0;
1398 hdev->sniff_max_interval = 800;
1399 hdev->sniff_min_interval = 80;
1400
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001401 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1403 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1404
1405 skb_queue_head_init(&hdev->rx_q);
1406 skb_queue_head_init(&hdev->cmd_q);
1407 skb_queue_head_init(&hdev->raw_q);
1408
Ville Tervo6bd32322011-02-16 16:32:41 +02001409 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1410
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301411 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001412 hdev->reassembly[i] = NULL;
1413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001415 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417 inquiry_cache_init(hdev);
1418
1419 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001420 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
David Millerea4bd8b2010-07-30 21:54:49 -07001422 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001423
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001424 INIT_LIST_HEAD(&hdev->uuids);
1425
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001426 INIT_LIST_HEAD(&hdev->link_keys);
1427
Szymon Janc2763eda2011-03-22 13:12:22 +01001428 INIT_LIST_HEAD(&hdev->remote_oob_data);
1429
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001430 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 rwlock_init(&hdev->adv_entries_lock);
1432 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001433
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001434 INIT_WORK(&hdev->power_on, hci_power_on);
1435 INIT_WORK(&hdev->power_off, hci_power_off);
1436 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1439
1440 atomic_set(&hdev->promisc, 0);
1441
1442 write_unlock_bh(&hci_dev_list_lock);
1443
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001444 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1445 if (!hdev->workqueue)
1446 goto nomem;
1447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001449 if (IS_ERR(hdev->tfm))
1450 BT_INFO("Failed to load transform for ecb(aes): %ld",
1451 PTR_ERR(hdev->tfm));
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 hci_register_sysfs(hdev);
1454
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001455 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1456 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1457 if (hdev->rfkill) {
1458 if (rfkill_register(hdev->rfkill) < 0) {
1459 rfkill_destroy(hdev->rfkill);
1460 hdev->rfkill = NULL;
1461 }
1462 }
1463
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001464 set_bit(HCI_AUTO_OFF, &hdev->flags);
1465 set_bit(HCI_SETUP, &hdev->flags);
1466 queue_work(hdev->workqueue, &hdev->power_on);
1467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 hci_notify(hdev, HCI_DEV_REG);
1469
1470 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001471
1472nomem:
1473 write_lock_bh(&hci_dev_list_lock);
1474 list_del(&hdev->list);
1475 write_unlock_bh(&hci_dev_list_lock);
1476
1477 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478}
1479EXPORT_SYMBOL(hci_register_dev);
1480
1481/* Unregister HCI device */
1482int hci_unregister_dev(struct hci_dev *hdev)
1483{
Marcel Holtmannef222012007-07-11 06:42:04 +02001484 int i;
1485
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001486 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 write_lock_bh(&hci_dev_list_lock);
1489 list_del(&hdev->list);
1490 write_unlock_bh(&hci_dev_list_lock);
1491
1492 hci_dev_do_close(hdev);
1493
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301494 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001495 kfree_skb(hdev->reassembly[i]);
1496
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001497 if (!test_bit(HCI_INIT, &hdev->flags) &&
1498 !test_bit(HCI_SETUP, &hdev->flags))
1499 mgmt_index_removed(hdev->id);
1500
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001501 if (!IS_ERR(hdev->tfm))
1502 crypto_free_blkcipher(hdev->tfm);
1503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 hci_notify(hdev, HCI_DEV_UNREG);
1505
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001506 if (hdev->rfkill) {
1507 rfkill_unregister(hdev->rfkill);
1508 rfkill_destroy(hdev->rfkill);
1509 }
1510
Dave Young147e2d52008-03-05 18:45:59 -08001511 hci_unregister_sysfs(hdev);
1512
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001513 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001514 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001515
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001516 destroy_workqueue(hdev->workqueue);
1517
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001518 hci_dev_lock_bh(hdev);
1519 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001520 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001521 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001522 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001523 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001524 hci_dev_unlock_bh(hdev);
1525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 return 0;
1529}
1530EXPORT_SYMBOL(hci_unregister_dev);
1531
1532/* Suspend HCI device */
1533int hci_suspend_dev(struct hci_dev *hdev)
1534{
1535 hci_notify(hdev, HCI_DEV_SUSPEND);
1536 return 0;
1537}
1538EXPORT_SYMBOL(hci_suspend_dev);
1539
1540/* Resume HCI device */
1541int hci_resume_dev(struct hci_dev *hdev)
1542{
1543 hci_notify(hdev, HCI_DEV_RESUME);
1544 return 0;
1545}
1546EXPORT_SYMBOL(hci_resume_dev);
1547
Marcel Holtmann76bca882009-11-18 00:40:39 +01001548/* Receive frame from HCI drivers */
1549int hci_recv_frame(struct sk_buff *skb)
1550{
1551 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1552 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1553 && !test_bit(HCI_INIT, &hdev->flags))) {
1554 kfree_skb(skb);
1555 return -ENXIO;
1556 }
1557
1558 /* Incomming skb */
1559 bt_cb(skb)->incoming = 1;
1560
1561 /* Time stamp */
1562 __net_timestamp(skb);
1563
1564 /* Queue frame for rx task */
1565 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001566 tasklet_schedule(&hdev->rx_task);
1567
Marcel Holtmann76bca882009-11-18 00:40:39 +01001568 return 0;
1569}
1570EXPORT_SYMBOL(hci_recv_frame);
1571
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301572static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001573 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301574{
1575 int len = 0;
1576 int hlen = 0;
1577 int remain = count;
1578 struct sk_buff *skb;
1579 struct bt_skb_cb *scb;
1580
1581 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1582 index >= NUM_REASSEMBLY)
1583 return -EILSEQ;
1584
1585 skb = hdev->reassembly[index];
1586
1587 if (!skb) {
1588 switch (type) {
1589 case HCI_ACLDATA_PKT:
1590 len = HCI_MAX_FRAME_SIZE;
1591 hlen = HCI_ACL_HDR_SIZE;
1592 break;
1593 case HCI_EVENT_PKT:
1594 len = HCI_MAX_EVENT_SIZE;
1595 hlen = HCI_EVENT_HDR_SIZE;
1596 break;
1597 case HCI_SCODATA_PKT:
1598 len = HCI_MAX_SCO_SIZE;
1599 hlen = HCI_SCO_HDR_SIZE;
1600 break;
1601 }
1602
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001603 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301604 if (!skb)
1605 return -ENOMEM;
1606
1607 scb = (void *) skb->cb;
1608 scb->expect = hlen;
1609 scb->pkt_type = type;
1610
1611 skb->dev = (void *) hdev;
1612 hdev->reassembly[index] = skb;
1613 }
1614
1615 while (count) {
1616 scb = (void *) skb->cb;
1617 len = min(scb->expect, (__u16)count);
1618
1619 memcpy(skb_put(skb, len), data, len);
1620
1621 count -= len;
1622 data += len;
1623 scb->expect -= len;
1624 remain = count;
1625
1626 switch (type) {
1627 case HCI_EVENT_PKT:
1628 if (skb->len == HCI_EVENT_HDR_SIZE) {
1629 struct hci_event_hdr *h = hci_event_hdr(skb);
1630 scb->expect = h->plen;
1631
1632 if (skb_tailroom(skb) < scb->expect) {
1633 kfree_skb(skb);
1634 hdev->reassembly[index] = NULL;
1635 return -ENOMEM;
1636 }
1637 }
1638 break;
1639
1640 case HCI_ACLDATA_PKT:
1641 if (skb->len == HCI_ACL_HDR_SIZE) {
1642 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1643 scb->expect = __le16_to_cpu(h->dlen);
1644
1645 if (skb_tailroom(skb) < scb->expect) {
1646 kfree_skb(skb);
1647 hdev->reassembly[index] = NULL;
1648 return -ENOMEM;
1649 }
1650 }
1651 break;
1652
1653 case HCI_SCODATA_PKT:
1654 if (skb->len == HCI_SCO_HDR_SIZE) {
1655 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1656 scb->expect = h->dlen;
1657
1658 if (skb_tailroom(skb) < scb->expect) {
1659 kfree_skb(skb);
1660 hdev->reassembly[index] = NULL;
1661 return -ENOMEM;
1662 }
1663 }
1664 break;
1665 }
1666
1667 if (scb->expect == 0) {
1668 /* Complete frame */
1669
1670 bt_cb(skb)->pkt_type = type;
1671 hci_recv_frame(skb);
1672
1673 hdev->reassembly[index] = NULL;
1674 return remain;
1675 }
1676 }
1677
1678 return remain;
1679}
1680
Marcel Holtmannef222012007-07-11 06:42:04 +02001681int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1682{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301683 int rem = 0;
1684
Marcel Holtmannef222012007-07-11 06:42:04 +02001685 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1686 return -EILSEQ;
1687
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001688 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001689 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301690 if (rem < 0)
1691 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001692
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301693 data += (count - rem);
1694 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001695 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001696
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301697 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001698}
1699EXPORT_SYMBOL(hci_recv_fragment);
1700
Suraj Sumangala99811512010-07-14 13:02:19 +05301701#define STREAM_REASSEMBLY 0
1702
1703int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1704{
1705 int type;
1706 int rem = 0;
1707
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001708 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301709 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1710
1711 if (!skb) {
1712 struct { char type; } *pkt;
1713
1714 /* Start of the frame */
1715 pkt = data;
1716 type = pkt->type;
1717
1718 data++;
1719 count--;
1720 } else
1721 type = bt_cb(skb)->pkt_type;
1722
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001723 rem = hci_reassembly(hdev, type, data, count,
1724 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301725 if (rem < 0)
1726 return rem;
1727
1728 data += (count - rem);
1729 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001730 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301731
1732 return rem;
1733}
1734EXPORT_SYMBOL(hci_recv_stream_fragment);
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736/* ---- Interface to upper protocols ---- */
1737
1738/* Register/Unregister protocols.
1739 * hci_task_lock is used to ensure that no tasks are running. */
1740int hci_register_proto(struct hci_proto *hp)
1741{
1742 int err = 0;
1743
1744 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1745
1746 if (hp->id >= HCI_MAX_PROTO)
1747 return -EINVAL;
1748
1749 write_lock_bh(&hci_task_lock);
1750
1751 if (!hci_proto[hp->id])
1752 hci_proto[hp->id] = hp;
1753 else
1754 err = -EEXIST;
1755
1756 write_unlock_bh(&hci_task_lock);
1757
1758 return err;
1759}
1760EXPORT_SYMBOL(hci_register_proto);
1761
1762int hci_unregister_proto(struct hci_proto *hp)
1763{
1764 int err = 0;
1765
1766 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1767
1768 if (hp->id >= HCI_MAX_PROTO)
1769 return -EINVAL;
1770
1771 write_lock_bh(&hci_task_lock);
1772
1773 if (hci_proto[hp->id])
1774 hci_proto[hp->id] = NULL;
1775 else
1776 err = -ENOENT;
1777
1778 write_unlock_bh(&hci_task_lock);
1779
1780 return err;
1781}
1782EXPORT_SYMBOL(hci_unregister_proto);
1783
1784int hci_register_cb(struct hci_cb *cb)
1785{
1786 BT_DBG("%p name %s", cb, cb->name);
1787
1788 write_lock_bh(&hci_cb_list_lock);
1789 list_add(&cb->list, &hci_cb_list);
1790 write_unlock_bh(&hci_cb_list_lock);
1791
1792 return 0;
1793}
1794EXPORT_SYMBOL(hci_register_cb);
1795
1796int hci_unregister_cb(struct hci_cb *cb)
1797{
1798 BT_DBG("%p name %s", cb, cb->name);
1799
1800 write_lock_bh(&hci_cb_list_lock);
1801 list_del(&cb->list);
1802 write_unlock_bh(&hci_cb_list_lock);
1803
1804 return 0;
1805}
1806EXPORT_SYMBOL(hci_unregister_cb);
1807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001808int hci_register_amp(struct amp_mgr_cb *cb)
1809{
1810 BT_DBG("%p", cb);
1811
1812 write_lock_bh(&amp_mgr_cb_list_lock);
1813 list_add(&cb->list, &amp_mgr_cb_list);
1814 write_unlock_bh(&amp_mgr_cb_list_lock);
1815
1816 return 0;
1817}
1818EXPORT_SYMBOL(hci_register_amp);
1819
1820int hci_unregister_amp(struct amp_mgr_cb *cb)
1821{
1822 BT_DBG("%p", cb);
1823
1824 write_lock_bh(&amp_mgr_cb_list_lock);
1825 list_del(&cb->list);
1826 write_unlock_bh(&amp_mgr_cb_list_lock);
1827
1828 return 0;
1829}
1830EXPORT_SYMBOL(hci_unregister_amp);
1831
1832void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1833 struct sk_buff *skb)
1834{
1835 struct amp_mgr_cb *cb;
1836
1837 BT_DBG("opcode 0x%x", opcode);
1838
1839 read_lock_bh(&amp_mgr_cb_list_lock);
1840 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1841 if (cb->amp_cmd_complete_event)
1842 cb->amp_cmd_complete_event(hdev, opcode, skb);
1843 }
1844 read_unlock_bh(&amp_mgr_cb_list_lock);
1845}
1846
1847void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1848{
1849 struct amp_mgr_cb *cb;
1850
1851 BT_DBG("opcode 0x%x, status %d", opcode, status);
1852
1853 read_lock_bh(&amp_mgr_cb_list_lock);
1854 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1855 if (cb->amp_cmd_status_event)
1856 cb->amp_cmd_status_event(hdev, opcode, status);
1857 }
1858 read_unlock_bh(&amp_mgr_cb_list_lock);
1859}
1860
1861void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1862 struct sk_buff *skb)
1863{
1864 struct amp_mgr_cb *cb;
1865
1866 BT_DBG("ev_code 0x%x", ev_code);
1867
1868 read_lock_bh(&amp_mgr_cb_list_lock);
1869 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1870 if (cb->amp_event)
1871 cb->amp_event(hdev, ev_code, skb);
1872 }
1873 read_unlock_bh(&amp_mgr_cb_list_lock);
1874}
1875
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876static int hci_send_frame(struct sk_buff *skb)
1877{
1878 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1879
1880 if (!hdev) {
1881 kfree_skb(skb);
1882 return -ENODEV;
1883 }
1884
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001885 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 if (atomic_read(&hdev->promisc)) {
1888 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001889 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001891 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 }
1893
1894 /* Get rid of skb owner, prior to sending to the driver. */
1895 skb_orphan(skb);
1896
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 return hdev->send(skb);
1899}
1900
1901/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001902int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903{
1904 int len = HCI_COMMAND_HDR_SIZE + plen;
1905 struct hci_command_hdr *hdr;
1906 struct sk_buff *skb;
1907
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001908 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 skb = bt_skb_alloc(len, GFP_ATOMIC);
1911 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001912 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 return -ENOMEM;
1914 }
1915
1916 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001917 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 hdr->plen = plen;
1919
1920 if (plen)
1921 memcpy(skb_put(skb, plen), param, plen);
1922
1923 BT_DBG("skb len %d", skb->len);
1924
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001925 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001927
Johan Hedberga5040ef2011-01-10 13:28:59 +02001928 if (test_bit(HCI_INIT, &hdev->flags))
1929 hdev->init_last_cmd = opcode;
1930
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001932 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 return 0;
1935}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001936EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001939void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940{
1941 struct hci_command_hdr *hdr;
1942
1943 if (!hdev->sent_cmd)
1944 return NULL;
1945
1946 hdr = (void *) hdev->sent_cmd->data;
1947
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001948 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 return NULL;
1950
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001951 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
1953 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1954}
1955
1956/* Send ACL data */
1957static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1958{
1959 struct hci_acl_hdr *hdr;
1960 int len = skb->len;
1961
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001962 skb_push(skb, HCI_ACL_HDR_SIZE);
1963 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001964 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001965 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1966 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967}
1968
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
1970 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971{
1972 struct hci_dev *hdev = conn->hdev;
1973 struct sk_buff *list;
1974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001975 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001978 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 if (hdev->dev_type == HCI_BREDR)
1980 hci_add_acl_hdr(skb, conn->handle, flags);
1981 else
1982 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001984 list = skb_shinfo(skb)->frag_list;
1985 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 /* Non fragmented */
1987 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1988
1989 skb_queue_tail(&conn->data_q, skb);
1990 } else {
1991 /* Fragmented */
1992 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1993
1994 skb_shinfo(skb)->frag_list = NULL;
1995
1996 /* Queue all fragments atomically */
1997 spin_lock_bh(&conn->data_q.lock);
1998
1999 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002001 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 do {
2003 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002004
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002006 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002007 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
2009 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2010
2011 __skb_queue_tail(&conn->data_q, skb);
2012 } while (list);
2013
2014 spin_unlock_bh(&conn->data_q.lock);
2015 }
2016
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002017 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019EXPORT_SYMBOL(hci_send_acl);
2020
2021/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002022void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 struct hci_dev *hdev = conn->hdev;
2025 struct hci_sco_hdr hdr;
2026
2027 BT_DBG("%s len %d", hdev->name, skb->len);
2028
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002029 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 hdr.dlen = skb->len;
2031
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002032 skb_push(skb, HCI_SCO_HDR_SIZE);
2033 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002034 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002037 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002040 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041}
2042EXPORT_SYMBOL(hci_send_sco);
2043
2044/* ---- HCI TX task (outgoing data) ---- */
2045
2046/* HCI Connection scheduler */
2047static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2048{
2049 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02002050 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 int num = 0, min = ~0;
2052 struct list_head *p;
2053
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002054 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 * added and removed with TX task disabled. */
2056 list_for_each(p, &h->list) {
2057 struct hci_conn *c;
2058 c = list_entry(p, struct hci_conn, list);
2059
Marcel Holtmann769be972008-07-14 20:13:49 +02002060 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002062
2063 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2064 continue;
2065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 num++;
2067
2068 if (c->sent < min) {
2069 min = c->sent;
2070 conn = c;
2071 }
2072 }
2073
2074 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002075 int cnt, q;
2076
2077 switch (conn->type) {
2078 case ACL_LINK:
2079 cnt = hdev->acl_cnt;
2080 break;
2081 case SCO_LINK:
2082 case ESCO_LINK:
2083 cnt = hdev->sco_cnt;
2084 break;
2085 case LE_LINK:
2086 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2087 break;
2088 default:
2089 cnt = 0;
2090 BT_ERR("Unknown link type");
2091 }
2092
2093 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 *quote = q ? q : 1;
2095 } else
2096 *quote = 0;
2097
2098 BT_DBG("conn %p quote %d", conn, *quote);
2099 return conn;
2100}
2101
Ville Tervobae1f5d2011-02-10 22:38:53 -03002102static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103{
2104 struct hci_conn_hash *h = &hdev->conn_hash;
2105 struct list_head *p;
2106 struct hci_conn *c;
2107
Ville Tervobae1f5d2011-02-10 22:38:53 -03002108 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109
2110 /* Kill stalled connections */
2111 list_for_each(p, &h->list) {
2112 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002113 if (c->type == type && c->sent) {
2114 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 hdev->name, batostr(&c->dst));
2116 hci_acl_disconn(c, 0x13);
2117 }
2118 }
2119}
2120
2121static inline void hci_sched_acl(struct hci_dev *hdev)
2122{
2123 struct hci_conn *conn;
2124 struct sk_buff *skb;
2125 int quote;
2126
2127 BT_DBG("%s", hdev->name);
2128
2129 if (!test_bit(HCI_RAW, &hdev->flags)) {
2130 /* ACL tx timeout must be longer than maximum
2131 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002132 if (hdev->acl_cnt <= 0 &&
2133 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002134 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002137 while (hdev->acl_cnt > 0 &&
2138 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2139 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2140 int count = 1;
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144 if (hdev->flow_ctl_mode ==
2145 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2146 /* Calculate count of blocks used by
2147 * this packet
2148 */
2149 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2150 hdev->data_block_len) + 1;
2151
2152 if (count > hdev->acl_cnt)
2153 return;
2154
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002155 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002156
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157 hci_send_frame(skb);
2158 hdev->acl_last_tx = jiffies;
2159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002160 hdev->acl_cnt -= count;
2161 quote -= count;
2162
2163 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 }
2165 }
2166}
2167
2168/* Schedule SCO */
2169static inline void hci_sched_sco(struct hci_dev *hdev)
2170{
2171 struct hci_conn *conn;
2172 struct sk_buff *skb;
2173 int quote;
2174
2175 BT_DBG("%s", hdev->name);
2176
2177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2178 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2179 BT_DBG("skb %p len %d", skb, skb->len);
2180 hci_send_frame(skb);
2181
2182 conn->sent++;
2183 if (conn->sent == ~0)
2184 conn->sent = 0;
2185 }
2186 }
2187}
2188
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002189static inline void hci_sched_esco(struct hci_dev *hdev)
2190{
2191 struct hci_conn *conn;
2192 struct sk_buff *skb;
2193 int quote;
2194
2195 BT_DBG("%s", hdev->name);
2196
2197 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2198 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2199 BT_DBG("skb %p len %d", skb, skb->len);
2200 hci_send_frame(skb);
2201
2202 conn->sent++;
2203 if (conn->sent == ~0)
2204 conn->sent = 0;
2205 }
2206 }
2207}
2208
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002209static inline void hci_sched_le(struct hci_dev *hdev)
2210{
2211 struct hci_conn *conn;
2212 struct sk_buff *skb;
2213 int quote, cnt;
2214
2215 BT_DBG("%s", hdev->name);
2216
2217 if (!test_bit(HCI_RAW, &hdev->flags)) {
2218 /* LE tx timeout must be longer than maximum
2219 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002220 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002221 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002222 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002223 }
2224
2225 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2226 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2227 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2228 BT_DBG("skb %p len %d", skb, skb->len);
2229
2230 hci_send_frame(skb);
2231 hdev->le_last_tx = jiffies;
2232
2233 cnt--;
2234 conn->sent++;
2235 }
2236 }
2237 if (hdev->le_pkts)
2238 hdev->le_cnt = cnt;
2239 else
2240 hdev->acl_cnt = cnt;
2241}
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243static void hci_tx_task(unsigned long arg)
2244{
2245 struct hci_dev *hdev = (struct hci_dev *) arg;
2246 struct sk_buff *skb;
2247
2248 read_lock(&hci_task_lock);
2249
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002250 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2251 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
2253 /* Schedule queues and send stuff to HCI driver */
2254
2255 hci_sched_acl(hdev);
2256
2257 hci_sched_sco(hdev);
2258
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002259 hci_sched_esco(hdev);
2260
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002261 hci_sched_le(hdev);
2262
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 /* Send next queued raw (unknown type) packet */
2264 while ((skb = skb_dequeue(&hdev->raw_q)))
2265 hci_send_frame(skb);
2266
2267 read_unlock(&hci_task_lock);
2268}
2269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002270/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
2272/* ACL data packet */
2273static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2274{
2275 struct hci_acl_hdr *hdr = (void *) skb->data;
2276 struct hci_conn *conn;
2277 __u16 handle, flags;
2278
2279 skb_pull(skb, HCI_ACL_HDR_SIZE);
2280
2281 handle = __le16_to_cpu(hdr->handle);
2282 flags = hci_flags(handle);
2283 handle = hci_handle(handle);
2284
2285 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2286
2287 hdev->stat.acl_rx++;
2288
2289 hci_dev_lock(hdev);
2290 conn = hci_conn_hash_lookup_handle(hdev, handle);
2291 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 if (conn) {
2294 register struct hci_proto *hp;
2295
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002296 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002299 hp = hci_proto[HCI_PROTO_L2CAP];
2300 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 hp->recv_acldata(conn, skb, flags);
2302 return;
2303 }
2304 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002305 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 hdev->name, handle);
2307 }
2308
2309 kfree_skb(skb);
2310}
2311
2312/* SCO data packet */
2313static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2314{
2315 struct hci_sco_hdr *hdr = (void *) skb->data;
2316 struct hci_conn *conn;
2317 __u16 handle;
2318
2319 skb_pull(skb, HCI_SCO_HDR_SIZE);
2320
2321 handle = __le16_to_cpu(hdr->handle);
2322
2323 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2324
2325 hdev->stat.sco_rx++;
2326
2327 hci_dev_lock(hdev);
2328 conn = hci_conn_hash_lookup_handle(hdev, handle);
2329 hci_dev_unlock(hdev);
2330
2331 if (conn) {
2332 register struct hci_proto *hp;
2333
2334 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002335 hp = hci_proto[HCI_PROTO_SCO];
2336 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 hp->recv_scodata(conn, skb);
2338 return;
2339 }
2340 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002341 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 hdev->name, handle);
2343 }
2344
2345 kfree_skb(skb);
2346}
2347
Marcel Holtmann65164552005-10-28 19:20:48 +02002348static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349{
2350 struct hci_dev *hdev = (struct hci_dev *) arg;
2351 struct sk_buff *skb;
2352
2353 BT_DBG("%s", hdev->name);
2354
2355 read_lock(&hci_task_lock);
2356
2357 while ((skb = skb_dequeue(&hdev->rx_q))) {
2358 if (atomic_read(&hdev->promisc)) {
2359 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002360 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 }
2362
2363 if (test_bit(HCI_RAW, &hdev->flags)) {
2364 kfree_skb(skb);
2365 continue;
2366 }
2367
2368 if (test_bit(HCI_INIT, &hdev->flags)) {
2369 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002370 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 case HCI_ACLDATA_PKT:
2372 case HCI_SCODATA_PKT:
2373 kfree_skb(skb);
2374 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002375 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376 }
2377
2378 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002379 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 case HCI_EVENT_PKT:
2381 hci_event_packet(hdev, skb);
2382 break;
2383
2384 case HCI_ACLDATA_PKT:
2385 BT_DBG("%s ACL data packet", hdev->name);
2386 hci_acldata_packet(hdev, skb);
2387 break;
2388
2389 case HCI_SCODATA_PKT:
2390 BT_DBG("%s SCO data packet", hdev->name);
2391 hci_scodata_packet(hdev, skb);
2392 break;
2393
2394 default:
2395 kfree_skb(skb);
2396 break;
2397 }
2398 }
2399
2400 read_unlock(&hci_task_lock);
2401}
2402
2403static void hci_cmd_task(unsigned long arg)
2404{
2405 struct hci_dev *hdev = (struct hci_dev *) arg;
2406 struct sk_buff *skb;
2407
2408 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2409
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002411 if (atomic_read(&hdev->cmd_cnt)) {
2412 skb = skb_dequeue(&hdev->cmd_q);
2413 if (!skb)
2414 return;
2415
Wei Yongjun7585b972009-02-25 18:29:52 +08002416 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002418 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2419 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 atomic_dec(&hdev->cmd_cnt);
2421 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002422 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002423 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 } else {
2425 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002426 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 }
2428 }
2429}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002430
2431module_param(enable_smp, bool, 0644);
2432MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");