blob: 2528c7f5f5d530d02818a79515049119d6cb0596 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Mat Martineauf058a442011-08-26 09:33:32 -070055#define AUTO_OFF_TIMEOUT 2000
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Brian Gixa68668b2011-08-11 15:49:36 -070063static int enable_smp = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
200{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200201 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800203 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200204 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 BT_DBG("%s %ld", hdev->name, opt);
207
208 /* Driver initialization */
209
210 /* Special commands */
211 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700212 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100216 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 }
218 skb_queue_purge(&hdev->driver_init);
219
220 /* Mandatory initialization */
221
222 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300223 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
224 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300226 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200228 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200230
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231
232 /* Set default HCI Flow Control Mode */
233 if (hdev->dev_type == HCI_BREDR)
234 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
235 else
236 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
237
238 /* Read HCI Flow Control Mode */
239 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200242 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
245 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247#if 0
248 /* Host buffer size */
249 {
250 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700251 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 cp.acl_max_pkt = cpu_to_le16(0xffff);
254 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 }
257#endif
258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259 if (hdev->dev_type == HCI_BREDR) {
260 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 /* Read Local Supported Features */
263 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265 /* Read BD Address */
266 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268 /* Read Class of Device */
269 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 /* Read Local Name */
272 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 /* Read Voice Setting */
275 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700277 /* Optional initialization */
278 /* Clear Event Filters */
279 flt_type = HCI_FLT_CLEAR_ALL;
280 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700282 /* Connection accept timeout ~20 secs */
283 param = cpu_to_le16(0x7d00);
284 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
285
286 bacpy(&cp.bdaddr, BDADDR_ANY);
287 cp.delete_all = 1;
288 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
289 sizeof(cp), &cp);
290 } else {
291 /* AMP initialization */
292 /* Connection accept timeout ~5 secs */
293 param = cpu_to_le16(0x1f40);
294 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
295
296 /* Read AMP Info */
297 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
298 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300301static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
302{
303 BT_DBG("%s", hdev->name);
304
305 /* Read LE buffer size */
306 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
307}
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 scan = opt;
312
313 BT_DBG("%s %x", hdev->name, scan);
314
315 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 auth = opt;
322
323 BT_DBG("%s %x", hdev->name, auth);
324
325 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
329static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __u8 encrypt = opt;
332
333 BT_DBG("%s %x", hdev->name, encrypt);
334
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200335 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200336 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
340{
341 __le16 policy = cpu_to_le16(opt);
342
Marcel Holtmanna418b892008-11-30 12:17:28 +0100343 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200344
345 /* Default link policy */
346 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
347}
348
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900349/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 * Device is held on return. */
351struct hci_dev *hci_dev_get(int index)
352{
353 struct hci_dev *hdev = NULL;
354 struct list_head *p;
355
356 BT_DBG("%d", index);
357
358 if (index < 0)
359 return NULL;
360
361 read_lock(&hci_dev_list_lock);
362 list_for_each(p, &hci_dev_list) {
363 struct hci_dev *d = list_entry(p, struct hci_dev, list);
364 if (d->id == index) {
365 hdev = hci_dev_hold(d);
366 break;
367 }
368 }
369 read_unlock(&hci_dev_list_lock);
370 return hdev;
371}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374/* ---- Inquiry support ---- */
375static void inquiry_cache_flush(struct hci_dev *hdev)
376{
377 struct inquiry_cache *cache = &hdev->inq_cache;
378 struct inquiry_entry *next = cache->list, *e;
379
380 BT_DBG("cache %p", cache);
381
382 cache->list = NULL;
383 while ((e = next)) {
384 next = e->next;
385 kfree(e);
386 }
387}
388
389struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_entry *e;
393
394 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
395
396 for (e = cache->list; e; e = e->next)
397 if (!bacmp(&e->data.bdaddr, bdaddr))
398 break;
399 return e;
400}
401
402void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
403{
404 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200405 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
408
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200409 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
410 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
413 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200415
416 ie->next = cache->list;
417 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 }
419
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200420 memcpy(&ie->data, data, sizeof(*data));
421 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 cache->timestamp = jiffies;
423}
424
425static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
426{
427 struct inquiry_cache *cache = &hdev->inq_cache;
428 struct inquiry_info *info = (struct inquiry_info *) buf;
429 struct inquiry_entry *e;
430 int copied = 0;
431
432 for (e = cache->list; e && copied < num; e = e->next, copied++) {
433 struct inquiry_data *data = &e->data;
434 bacpy(&info->bdaddr, &data->bdaddr);
435 info->pscan_rep_mode = data->pscan_rep_mode;
436 info->pscan_period_mode = data->pscan_period_mode;
437 info->pscan_mode = data->pscan_mode;
438 memcpy(info->dev_class, data->dev_class, 3);
439 info->clock_offset = data->clock_offset;
440 info++;
441 }
442
443 BT_DBG("cache %p, copied %d", cache, copied);
444 return copied;
445}
446
447static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
448{
449 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
450 struct hci_cp_inquiry cp;
451
452 BT_DBG("%s", hdev->name);
453
454 if (test_bit(HCI_INQUIRY, &hdev->flags))
455 return;
456
457 /* Start Inquiry */
458 memcpy(&cp.lap, &ir->lap, 3);
459 cp.length = ir->length;
460 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200461 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464int hci_inquiry(void __user *arg)
465{
466 __u8 __user *ptr = arg;
467 struct hci_inquiry_req ir;
468 struct hci_dev *hdev;
469 int err = 0, do_inquiry = 0, max_rsp;
470 long timeo;
471 __u8 *buf;
472
473 if (copy_from_user(&ir, ptr, sizeof(ir)))
474 return -EFAULT;
475
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200476 hdev = hci_dev_get(ir.dev_id);
477 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 return -ENODEV;
479
480 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900481 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200482 inquiry_cache_empty(hdev) ||
483 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 inquiry_cache_flush(hdev);
485 do_inquiry = 1;
486 }
487 hci_dev_unlock_bh(hdev);
488
Marcel Holtmann04837f62006-07-03 10:02:33 +0200489 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490
491 if (do_inquiry) {
492 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
493 if (err < 0)
494 goto done;
495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 /* for unlimited number of responses we will use buffer with 255 entries */
498 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
499
500 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
501 * copy it to the user space.
502 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100503 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 err = -ENOMEM;
506 goto done;
507 }
508
509 hci_dev_lock_bh(hdev);
510 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
511 hci_dev_unlock_bh(hdev);
512
513 BT_DBG("num_rsp %d", ir.num_rsp);
514
515 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
516 ptr += sizeof(ir);
517 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
518 ir.num_rsp))
519 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900520 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 err = -EFAULT;
522
523 kfree(buf);
524
525done:
526 hci_dev_put(hdev);
527 return err;
528}
529
530/* ---- HCI ioctl helpers ---- */
531
532int hci_dev_open(__u16 dev)
533{
534 struct hci_dev *hdev;
535 int ret = 0;
536
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200537 hdev = hci_dev_get(dev);
538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return -ENODEV;
540
541 BT_DBG("%s %p", hdev->name, hdev);
542
543 hci_req_lock(hdev);
544
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200545 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
546 ret = -ERFKILL;
547 goto done;
548 }
549
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 if (test_bit(HCI_UP, &hdev->flags)) {
551 ret = -EALREADY;
552 goto done;
553 }
554
555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
556 set_bit(HCI_RAW, &hdev->flags);
557
558 if (hdev->open(hdev)) {
559 ret = -EIO;
560 goto done;
561 }
562
563 if (!test_bit(HCI_RAW, &hdev->flags)) {
564 atomic_set(&hdev->cmd_cnt, 1);
565 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200566 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567
Marcel Holtmann04837f62006-07-03 10:02:33 +0200568 ret = __hci_request(hdev, hci_init_req, 0,
569 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700571 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300572 ret = __hci_request(hdev, hci_le_init_req, 0,
573 msecs_to_jiffies(HCI_INIT_TIMEOUT));
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 clear_bit(HCI_INIT, &hdev->flags);
576 }
577
578 if (!ret) {
579 hci_dev_hold(hdev);
580 set_bit(HCI_UP, &hdev->flags);
581 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200582 if (!test_bit(HCI_SETUP, &hdev->flags))
583 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900584 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 /* Init failed, cleanup */
586 tasklet_kill(&hdev->rx_task);
587 tasklet_kill(&hdev->tx_task);
588 tasklet_kill(&hdev->cmd_task);
589
590 skb_queue_purge(&hdev->cmd_q);
591 skb_queue_purge(&hdev->rx_q);
592
593 if (hdev->flush)
594 hdev->flush(hdev);
595
596 if (hdev->sent_cmd) {
597 kfree_skb(hdev->sent_cmd);
598 hdev->sent_cmd = NULL;
599 }
600
601 hdev->close(hdev);
602 hdev->flags = 0;
603 }
604
605done:
606 hci_req_unlock(hdev);
607 hci_dev_put(hdev);
608 return ret;
609}
610
611static int hci_dev_do_close(struct hci_dev *hdev)
612{
613 BT_DBG("%s %p", hdev->name, hdev);
614
615 hci_req_cancel(hdev, ENODEV);
616 hci_req_lock(hdev);
617
618 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300619 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 hci_req_unlock(hdev);
621 return 0;
622 }
623
624 /* Kill RX and TX tasks */
625 tasklet_kill(&hdev->rx_task);
626 tasklet_kill(&hdev->tx_task);
627
628 hci_dev_lock_bh(hdev);
629 inquiry_cache_flush(hdev);
630 hci_conn_hash_flush(hdev);
631 hci_dev_unlock_bh(hdev);
632
633 hci_notify(hdev, HCI_DEV_DOWN);
634
635 if (hdev->flush)
636 hdev->flush(hdev);
637
638 /* Reset device */
639 skb_queue_purge(&hdev->cmd_q);
640 atomic_set(&hdev->cmd_cnt, 1);
641 if (!test_bit(HCI_RAW, &hdev->flags)) {
642 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200643 __hci_request(hdev, hci_reset_req, 0,
644 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 clear_bit(HCI_INIT, &hdev->flags);
646 }
647
648 /* Kill cmd task */
649 tasklet_kill(&hdev->cmd_task);
650
651 /* Drop queues */
652 skb_queue_purge(&hdev->rx_q);
653 skb_queue_purge(&hdev->cmd_q);
654 skb_queue_purge(&hdev->raw_q);
655
656 /* Drop last sent command */
657 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300658 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 kfree_skb(hdev->sent_cmd);
660 hdev->sent_cmd = NULL;
661 }
662
663 /* After this point our queues are empty
664 * and no tasks are scheduled. */
665 hdev->close(hdev);
666
Johan Hedberg5add6af2010-12-16 10:00:37 +0200667 mgmt_powered(hdev->id, 0);
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 /* Clear flags */
670 hdev->flags = 0;
671
672 hci_req_unlock(hdev);
673
674 hci_dev_put(hdev);
675 return 0;
676}
677
678int hci_dev_close(__u16 dev)
679{
680 struct hci_dev *hdev;
681 int err;
682
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200683 hdev = hci_dev_get(dev);
684 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 return -ENODEV;
686 err = hci_dev_do_close(hdev);
687 hci_dev_put(hdev);
688 return err;
689}
690
691int hci_dev_reset(__u16 dev)
692{
693 struct hci_dev *hdev;
694 int ret = 0;
695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200696 hdev = hci_dev_get(dev);
697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return -ENODEV;
699
700 hci_req_lock(hdev);
701 tasklet_disable(&hdev->tx_task);
702
703 if (!test_bit(HCI_UP, &hdev->flags))
704 goto done;
705
706 /* Drop queues */
707 skb_queue_purge(&hdev->rx_q);
708 skb_queue_purge(&hdev->cmd_q);
709
710 hci_dev_lock_bh(hdev);
711 inquiry_cache_flush(hdev);
712 hci_conn_hash_flush(hdev);
713 hci_dev_unlock_bh(hdev);
714
715 if (hdev->flush)
716 hdev->flush(hdev);
717
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900718 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300719 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200722 ret = __hci_request(hdev, hci_reset_req, 0,
723 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
725done:
726 tasklet_enable(&hdev->tx_task);
727 hci_req_unlock(hdev);
728 hci_dev_put(hdev);
729 return ret;
730}
731
732int hci_dev_reset_stat(__u16 dev)
733{
734 struct hci_dev *hdev;
735 int ret = 0;
736
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200737 hdev = hci_dev_get(dev);
738 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 return -ENODEV;
740
741 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
742
743 hci_dev_put(hdev);
744
745 return ret;
746}
747
748int hci_dev_cmd(unsigned int cmd, void __user *arg)
749{
750 struct hci_dev *hdev;
751 struct hci_dev_req dr;
752 int err = 0;
753
754 if (copy_from_user(&dr, arg, sizeof(dr)))
755 return -EFAULT;
756
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200757 hdev = hci_dev_get(dr.dev_id);
758 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 return -ENODEV;
760
761 switch (cmd) {
762 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200763 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766
767 case HCISETENCRYPT:
768 if (!lmp_encrypt_capable(hdev)) {
769 err = -EOPNOTSUPP;
770 break;
771 }
772
773 if (!test_bit(HCI_AUTH, &hdev->flags)) {
774 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200775 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
776 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (err)
778 break;
779 }
780
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 break;
784
785 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200786 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
787 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 break;
789
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200790 case HCISETLINKPOL:
791 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
792 msecs_to_jiffies(HCI_INIT_TIMEOUT));
793 break;
794
795 case HCISETLINKMODE:
796 hdev->link_mode = ((__u16) dr.dev_opt) &
797 (HCI_LM_MASTER | HCI_LM_ACCEPT);
798 break;
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 case HCISETPTYPE:
801 hdev->pkt_type = (__u16) dr.dev_opt;
802 break;
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200805 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
806 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 break;
808
809 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200810 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
811 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 break;
813
814 default:
815 err = -EINVAL;
816 break;
817 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 hci_dev_put(hdev);
820 return err;
821}
822
823int hci_get_dev_list(void __user *arg)
824{
825 struct hci_dev_list_req *dl;
826 struct hci_dev_req *dr;
827 struct list_head *p;
828 int n = 0, size, err;
829 __u16 dev_num;
830
831 if (get_user(dev_num, (__u16 __user *) arg))
832 return -EFAULT;
833
834 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
835 return -EINVAL;
836
837 size = sizeof(*dl) + dev_num * sizeof(*dr);
838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200839 dl = kzalloc(size, GFP_KERNEL);
840 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENOMEM;
842
843 dr = dl->dev_req;
844
845 read_lock_bh(&hci_dev_list_lock);
846 list_for_each(p, &hci_dev_list) {
847 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200850
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200851 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200852
853 if (!test_bit(HCI_MGMT, &hdev->flags))
854 set_bit(HCI_PAIRABLE, &hdev->flags);
855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 (dr + n)->dev_id = hdev->id;
857 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 if (++n >= dev_num)
860 break;
861 }
862 read_unlock_bh(&hci_dev_list_lock);
863
864 dl->dev_num = n;
865 size = sizeof(*dl) + n * sizeof(*dr);
866
867 err = copy_to_user(arg, dl, size);
868 kfree(dl);
869
870 return err ? -EFAULT : 0;
871}
872
873int hci_get_dev_info(void __user *arg)
874{
875 struct hci_dev *hdev;
876 struct hci_dev_info di;
877 int err = 0;
878
879 if (copy_from_user(&di, arg, sizeof(di)))
880 return -EFAULT;
881
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200882 hdev = hci_dev_get(di.dev_id);
883 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return -ENODEV;
885
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200886 hci_del_off_timer(hdev);
887
Johan Hedbergc542a062011-01-26 13:11:03 +0200888 if (!test_bit(HCI_MGMT, &hdev->flags))
889 set_bit(HCI_PAIRABLE, &hdev->flags);
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 strcpy(di.name, hdev->name);
892 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100893 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 di.flags = hdev->flags;
895 di.pkt_type = hdev->pkt_type;
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 di.link_policy = hdev->link_policy;
901 di.link_mode = hdev->link_mode;
902
903 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
904 memcpy(&di.features, &hdev->features, sizeof(di.features));
905
906 if (copy_to_user(arg, &di, sizeof(di)))
907 err = -EFAULT;
908
909 hci_dev_put(hdev);
910
911 return err;
912}
913
914/* ---- Interface to HCI drivers ---- */
915
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200916static int hci_rfkill_set_block(void *data, bool blocked)
917{
918 struct hci_dev *hdev = data;
919
920 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
921
922 if (!blocked)
923 return 0;
924
925 hci_dev_do_close(hdev);
926
927 return 0;
928}
929
930static const struct rfkill_ops hci_rfkill_ops = {
931 .set_block = hci_rfkill_set_block,
932};
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934/* Alloc HCI device */
935struct hci_dev *hci_alloc_dev(void)
936{
937 struct hci_dev *hdev;
938
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200939 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (!hdev)
941 return NULL;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 skb_queue_head_init(&hdev->driver_init);
944
945 return hdev;
946}
947EXPORT_SYMBOL(hci_alloc_dev);
948
949/* Free HCI device */
950void hci_free_dev(struct hci_dev *hdev)
951{
952 skb_queue_purge(&hdev->driver_init);
953
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200954 /* will free via device release */
955 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956}
957EXPORT_SYMBOL(hci_free_dev);
958
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200959static void hci_power_on(struct work_struct *work)
960{
961 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
962
963 BT_DBG("%s", hdev->name);
964
Brian Gixa68668b2011-08-11 15:49:36 -0700965 if (hci_dev_open(hdev->id) < 0 && !test_bit(HCI_UP, &hdev->flags))
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200966 return;
967
968 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
969 mod_timer(&hdev->off_timer,
970 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
971
972 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
973 mgmt_index_added(hdev->id);
974}
975
976static void hci_power_off(struct work_struct *work)
977{
978 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_close(hdev->id);
983}
984
985static void hci_auto_off(unsigned long data)
986{
987 struct hci_dev *hdev = (struct hci_dev *) data;
988
989 BT_DBG("%s", hdev->name);
990
991 clear_bit(HCI_AUTO_OFF, &hdev->flags);
992
993 queue_work(hdev->workqueue, &hdev->power_off);
994}
995
996void hci_del_off_timer(struct hci_dev *hdev)
997{
998 BT_DBG("%s", hdev->name);
999
1000 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1001 del_timer(&hdev->off_timer);
1002}
1003
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001004int hci_uuids_clear(struct hci_dev *hdev)
1005{
1006 struct list_head *p, *n;
1007
1008 list_for_each_safe(p, n, &hdev->uuids) {
1009 struct bt_uuid *uuid;
1010
1011 uuid = list_entry(p, struct bt_uuid, list);
1012
1013 list_del(p);
1014 kfree(uuid);
1015 }
1016
1017 return 0;
1018}
1019
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001020int hci_link_keys_clear(struct hci_dev *hdev)
1021{
1022 struct list_head *p, *n;
1023
1024 list_for_each_safe(p, n, &hdev->link_keys) {
1025 struct link_key *key;
1026
1027 key = list_entry(p, struct link_key, list);
1028
1029 list_del(p);
1030 kfree(key);
1031 }
1032
1033 return 0;
1034}
1035
1036struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1037{
1038 struct list_head *p;
1039
1040 list_for_each(p, &hdev->link_keys) {
1041 struct link_key *k;
1042
1043 k = list_entry(p, struct link_key, list);
1044
1045 if (bacmp(bdaddr, &k->bdaddr) == 0)
1046 return k;
1047 }
1048
1049 return NULL;
1050}
1051
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001052struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 list_for_each(p, &hdev->link_keys) {
1057 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001058 struct key_master_id *id;
1059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001060 k = list_entry(p, struct link_key, list);
1061
1062 if (k->type != KEY_TYPE_LTK)
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001063 continue;
1064
1065 if (k->dlen != sizeof(*id))
1066 continue;
1067
1068 id = (void *) &k->data;
1069 if (id->ediv == ediv &&
1070 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1071 return k;
1072 }
1073
1074 return NULL;
1075}
1076EXPORT_SYMBOL(hci_find_ltk);
1077
1078struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1079 bdaddr_t *bdaddr, u8 type)
1080{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 struct list_head *p;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001083 list_for_each(p, &hdev->link_keys) {
1084 struct link_key *k;
1085
1086 k = list_entry(p, struct link_key, list);
1087
1088 if ((k->type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001089 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 }
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_link_key_type);
1095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001096int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1097 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098{
1099 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001100 struct hci_conn *conn;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001102 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001103
1104 old_key = hci_find_link_key(hdev, bdaddr);
1105 if (old_key) {
1106 old_key_type = old_key->type;
1107 key = old_key;
1108 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001110 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1111 if (!key)
1112 return -ENOMEM;
1113 list_add(&key->list, &hdev->link_keys);
1114 }
1115
1116 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1117
1118 bacpy(&key->bdaddr, bdaddr);
1119 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001120 key->auth = 0x01;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001121 key->type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001122 key->pin_len = pin_len;
1123
Brian Gixa68668b2011-08-11 15:49:36 -07001124 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
1125
Brian Gixdfdd9362011-08-18 09:58:02 -07001126 if (conn) {
1127 if (conn->remote_auth > 0x01)
1128 bonded = 1;
1129 else if (conn->auth_initiator && conn->auth_type > 0x01)
1130 bonded = 1;
1131 }
Brian Gixa68668b2011-08-11 15:49:36 -07001132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001133 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001134 mgmt_new_key(hdev->id, key, bonded);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001135
1136 if (type == 0x06)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001137 key->type = old_key_type;
1138
1139 return 0;
1140}
1141
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001142int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixa68668b2011-08-11 15:49:36 -07001143 u8 key_size, u8 auth, __le16 ediv, u8 rand[8],
1144 u8 ltk[16])
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001145{
1146 struct link_key *key, *old_key;
1147 struct key_master_id *id;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001148
Brian Gixa68668b2011-08-11 15:49:36 -07001149 BT_DBG("%s Auth: %2.2X addr %s", hdev->name, auth, batostr(bdaddr));
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001150
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001151 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001152 if (old_key) {
1153 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001154 } else {
1155 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156 if (!key)
1157 return -ENOMEM;
1158 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001159 }
1160
1161 key->dlen = sizeof(*id);
1162
1163 bacpy(&key->bdaddr, bdaddr);
1164 memcpy(key->val, ltk, sizeof(key->val));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 key->type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001166 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001167 key->auth = auth;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001168
1169 id = (void *) &key->data;
1170 id->ediv = ediv;
1171 memcpy(id->rand, rand, sizeof(id->rand));
1172
1173 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001174 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001175
1176 return 0;
1177}
1178
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001179int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180{
1181 struct link_key *key;
1182
1183 key = hci_find_link_key(hdev, bdaddr);
1184 if (!key)
1185 return -ENOENT;
1186
1187 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189 list_del(&key->list);
1190 kfree(key);
1191
1192 return 0;
1193}
1194
Ville Tervo6bd32322011-02-16 16:32:41 +02001195/* HCI command timer function */
1196static void hci_cmd_timer(unsigned long arg)
1197{
1198 struct hci_dev *hdev = (void *) arg;
1199
1200 BT_ERR("%s command tx timeout", hdev->name);
1201 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001203 tasklet_schedule(&hdev->cmd_task);
1204}
1205
Szymon Janc2763eda2011-03-22 13:12:22 +01001206struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1207 bdaddr_t *bdaddr)
1208{
1209 struct oob_data *data;
1210
1211 list_for_each_entry(data, &hdev->remote_oob_data, list)
1212 if (bacmp(bdaddr, &data->bdaddr) == 0)
1213 return data;
1214
1215 return NULL;
1216}
1217
1218int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1219{
1220 struct oob_data *data;
1221
1222 data = hci_find_remote_oob_data(hdev, bdaddr);
1223 if (!data)
1224 return -ENOENT;
1225
1226 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1227
1228 list_del(&data->list);
1229 kfree(data);
1230
1231 return 0;
1232}
1233
1234int hci_remote_oob_data_clear(struct hci_dev *hdev)
1235{
1236 struct oob_data *data, *n;
1237
1238 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1239 list_del(&data->list);
1240 kfree(data);
1241 }
1242
1243 return 0;
1244}
1245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001246static void hci_adv_clear(unsigned long arg)
1247{
1248 struct hci_dev *hdev = (void *) arg;
1249
1250 hci_adv_entries_clear(hdev);
1251}
1252
1253int hci_adv_entries_clear(struct hci_dev *hdev)
1254{
1255 struct list_head *p, *n;
1256
Brian Gixa68668b2011-08-11 15:49:36 -07001257 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001258 write_lock_bh(&hdev->adv_entries_lock);
1259
1260 list_for_each_safe(p, n, &hdev->adv_entries) {
1261 struct adv_entry *entry;
1262
1263 entry = list_entry(p, struct adv_entry, list);
1264
1265 list_del(p);
1266 kfree(entry);
1267 }
1268
1269 write_unlock_bh(&hdev->adv_entries_lock);
1270
1271 return 0;
1272}
1273
1274struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1275{
1276 struct list_head *p;
1277 struct adv_entry *res = NULL;
1278
Brian Gixa68668b2011-08-11 15:49:36 -07001279 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001280 read_lock_bh(&hdev->adv_entries_lock);
1281
1282 list_for_each(p, &hdev->adv_entries) {
1283 struct adv_entry *entry;
1284
1285 entry = list_entry(p, struct adv_entry, list);
1286
1287 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1288 res = entry;
1289 goto out;
1290 }
1291 }
1292out:
1293 read_unlock_bh(&hdev->adv_entries_lock);
1294 return res;
1295}
1296
1297static inline int is_connectable_adv(u8 evt_type)
1298{
1299 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1300 return 1;
1301
1302 return 0;
1303}
1304
Szymon Janc2763eda2011-03-22 13:12:22 +01001305int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1306 u8 *randomizer)
1307{
1308 struct oob_data *data;
1309
1310 data = hci_find_remote_oob_data(hdev, bdaddr);
1311
1312 if (!data) {
1313 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1314 if (!data)
1315 return -ENOMEM;
1316
1317 bacpy(&data->bdaddr, bdaddr);
1318 list_add(&data->list, &hdev->remote_oob_data);
1319 }
1320
1321 memcpy(data->hash, hash, sizeof(data->hash));
1322 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1323
1324 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1325
1326 return 0;
1327}
1328
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001329int hci_add_adv_entry(struct hci_dev *hdev,
1330 struct hci_ev_le_advertising_info *ev)
1331{
1332 struct adv_entry *entry;
1333
Brian Gixa68668b2011-08-11 15:49:36 -07001334 BT_DBG("");
1335
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001336 if (!is_connectable_adv(ev->evt_type))
1337 return -EINVAL;
1338
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001340 /* Only new entries should be added to adv_entries. So, if
1341 * bdaddr was found, don't add it. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001342 if (entry)
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001343 return 0;
1344
1345 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1346 if (!entry)
1347 return -ENOMEM;
1348
1349 bacpy(&entry->bdaddr, &ev->bdaddr);
1350 entry->bdaddr_type = ev->bdaddr_type;
1351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 write_lock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001353 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354 write_unlock(&hdev->adv_entries_lock);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001355
1356 return 0;
1357}
1358
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359static struct crypto_blkcipher *alloc_cypher(void)
1360{
1361 if (enable_smp)
1362 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1363
1364 return ERR_PTR(-ENOTSUPP);
1365}
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367/* Register HCI device */
1368int hci_register_dev(struct hci_dev *hdev)
1369{
1370 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001371 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001373 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1374 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 if (!hdev->open || !hdev->close || !hdev->destruct)
1377 return -EINVAL;
1378
1379 write_lock_bh(&hci_dev_list_lock);
1380
1381 /* Find first available device id */
1382 list_for_each(p, &hci_dev_list) {
1383 if (list_entry(p, struct hci_dev, list)->id != id)
1384 break;
1385 head = p; id++;
1386 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 sprintf(hdev->name, "hci%d", id);
1389 hdev->id = id;
1390 list_add(&hdev->list, head);
1391
1392 atomic_set(&hdev->refcnt, 1);
1393 spin_lock_init(&hdev->lock);
1394
1395 hdev->flags = 0;
1396 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001397 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001399 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400
Marcel Holtmann04837f62006-07-03 10:02:33 +02001401 hdev->idle_timeout = 0;
1402 hdev->sniff_max_interval = 800;
1403 hdev->sniff_min_interval = 80;
1404
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001405 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1407 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1408
1409 skb_queue_head_init(&hdev->rx_q);
1410 skb_queue_head_init(&hdev->cmd_q);
1411 skb_queue_head_init(&hdev->raw_q);
1412
Ville Tervo6bd32322011-02-16 16:32:41 +02001413 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1414
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301415 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001416 hdev->reassembly[i] = NULL;
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001419 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
1421 inquiry_cache_init(hdev);
1422
1423 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 hci_chan_list_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425
David Millerea4bd8b2010-07-30 21:54:49 -07001426 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001427
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001428 INIT_LIST_HEAD(&hdev->uuids);
1429
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001430 INIT_LIST_HEAD(&hdev->link_keys);
1431
Szymon Janc2763eda2011-03-22 13:12:22 +01001432 INIT_LIST_HEAD(&hdev->remote_oob_data);
1433
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001434 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001435 rwlock_init(&hdev->adv_entries_lock);
1436 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001437
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001438 INIT_WORK(&hdev->power_on, hci_power_on);
1439 INIT_WORK(&hdev->power_off, hci_power_off);
1440 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1443
1444 atomic_set(&hdev->promisc, 0);
1445
1446 write_unlock_bh(&hci_dev_list_lock);
1447
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001448 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1449 if (!hdev->workqueue)
1450 goto nomem;
1451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001453 if (IS_ERR(hdev->tfm))
1454 BT_INFO("Failed to load transform for ecb(aes): %ld",
1455 PTR_ERR(hdev->tfm));
1456
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 hci_register_sysfs(hdev);
1458
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001459 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1460 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1461 if (hdev->rfkill) {
1462 if (rfkill_register(hdev->rfkill) < 0) {
1463 rfkill_destroy(hdev->rfkill);
1464 hdev->rfkill = NULL;
1465 }
1466 }
1467
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001468 set_bit(HCI_AUTO_OFF, &hdev->flags);
1469 set_bit(HCI_SETUP, &hdev->flags);
1470 queue_work(hdev->workqueue, &hdev->power_on);
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 hci_notify(hdev, HCI_DEV_REG);
1473
1474 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001475
1476nomem:
1477 write_lock_bh(&hci_dev_list_lock);
1478 list_del(&hdev->list);
1479 write_unlock_bh(&hci_dev_list_lock);
1480
1481 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482}
1483EXPORT_SYMBOL(hci_register_dev);
1484
1485/* Unregister HCI device */
1486int hci_unregister_dev(struct hci_dev *hdev)
1487{
Marcel Holtmannef222012007-07-11 06:42:04 +02001488 int i;
1489
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001490 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 write_lock_bh(&hci_dev_list_lock);
1493 list_del(&hdev->list);
1494 write_unlock_bh(&hci_dev_list_lock);
1495
1496 hci_dev_do_close(hdev);
1497
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301498 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001499 kfree_skb(hdev->reassembly[i]);
1500
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001501 if (!test_bit(HCI_INIT, &hdev->flags) &&
1502 !test_bit(HCI_SETUP, &hdev->flags))
1503 mgmt_index_removed(hdev->id);
1504
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001505 if (!IS_ERR(hdev->tfm))
1506 crypto_free_blkcipher(hdev->tfm);
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 hci_notify(hdev, HCI_DEV_UNREG);
1509
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001510 if (hdev->rfkill) {
1511 rfkill_unregister(hdev->rfkill);
1512 rfkill_destroy(hdev->rfkill);
1513 }
1514
Dave Young147e2d52008-03-05 18:45:59 -08001515 hci_unregister_sysfs(hdev);
1516
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001517 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001518 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001519
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001520 destroy_workqueue(hdev->workqueue);
1521
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001522 hci_dev_lock_bh(hdev);
1523 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001524 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001525 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001526 hci_remote_oob_data_clear(hdev);
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001527 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001528 hci_dev_unlock_bh(hdev);
1529
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 return 0;
1533}
1534EXPORT_SYMBOL(hci_unregister_dev);
1535
1536/* Suspend HCI device */
1537int hci_suspend_dev(struct hci_dev *hdev)
1538{
1539 hci_notify(hdev, HCI_DEV_SUSPEND);
1540 return 0;
1541}
1542EXPORT_SYMBOL(hci_suspend_dev);
1543
1544/* Resume HCI device */
1545int hci_resume_dev(struct hci_dev *hdev)
1546{
1547 hci_notify(hdev, HCI_DEV_RESUME);
1548 return 0;
1549}
1550EXPORT_SYMBOL(hci_resume_dev);
1551
Marcel Holtmann76bca882009-11-18 00:40:39 +01001552/* Receive frame from HCI drivers */
1553int hci_recv_frame(struct sk_buff *skb)
1554{
1555 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1556 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1557 && !test_bit(HCI_INIT, &hdev->flags))) {
1558 kfree_skb(skb);
1559 return -ENXIO;
1560 }
1561
1562 /* Incomming skb */
1563 bt_cb(skb)->incoming = 1;
1564
1565 /* Time stamp */
1566 __net_timestamp(skb);
1567
1568 /* Queue frame for rx task */
1569 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001570 tasklet_schedule(&hdev->rx_task);
1571
Marcel Holtmann76bca882009-11-18 00:40:39 +01001572 return 0;
1573}
1574EXPORT_SYMBOL(hci_recv_frame);
1575
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301576static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001577 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301578{
1579 int len = 0;
1580 int hlen = 0;
1581 int remain = count;
1582 struct sk_buff *skb;
1583 struct bt_skb_cb *scb;
1584
1585 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1586 index >= NUM_REASSEMBLY)
1587 return -EILSEQ;
1588
1589 skb = hdev->reassembly[index];
1590
1591 if (!skb) {
1592 switch (type) {
1593 case HCI_ACLDATA_PKT:
1594 len = HCI_MAX_FRAME_SIZE;
1595 hlen = HCI_ACL_HDR_SIZE;
1596 break;
1597 case HCI_EVENT_PKT:
1598 len = HCI_MAX_EVENT_SIZE;
1599 hlen = HCI_EVENT_HDR_SIZE;
1600 break;
1601 case HCI_SCODATA_PKT:
1602 len = HCI_MAX_SCO_SIZE;
1603 hlen = HCI_SCO_HDR_SIZE;
1604 break;
1605 }
1606
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001607 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301608 if (!skb)
1609 return -ENOMEM;
1610
1611 scb = (void *) skb->cb;
1612 scb->expect = hlen;
1613 scb->pkt_type = type;
1614
1615 skb->dev = (void *) hdev;
1616 hdev->reassembly[index] = skb;
1617 }
1618
1619 while (count) {
1620 scb = (void *) skb->cb;
1621 len = min(scb->expect, (__u16)count);
1622
1623 memcpy(skb_put(skb, len), data, len);
1624
1625 count -= len;
1626 data += len;
1627 scb->expect -= len;
1628 remain = count;
1629
1630 switch (type) {
1631 case HCI_EVENT_PKT:
1632 if (skb->len == HCI_EVENT_HDR_SIZE) {
1633 struct hci_event_hdr *h = hci_event_hdr(skb);
1634 scb->expect = h->plen;
1635
1636 if (skb_tailroom(skb) < scb->expect) {
1637 kfree_skb(skb);
1638 hdev->reassembly[index] = NULL;
1639 return -ENOMEM;
1640 }
1641 }
1642 break;
1643
1644 case HCI_ACLDATA_PKT:
1645 if (skb->len == HCI_ACL_HDR_SIZE) {
1646 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1647 scb->expect = __le16_to_cpu(h->dlen);
1648
1649 if (skb_tailroom(skb) < scb->expect) {
1650 kfree_skb(skb);
1651 hdev->reassembly[index] = NULL;
1652 return -ENOMEM;
1653 }
1654 }
1655 break;
1656
1657 case HCI_SCODATA_PKT:
1658 if (skb->len == HCI_SCO_HDR_SIZE) {
1659 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1660 scb->expect = h->dlen;
1661
1662 if (skb_tailroom(skb) < scb->expect) {
1663 kfree_skb(skb);
1664 hdev->reassembly[index] = NULL;
1665 return -ENOMEM;
1666 }
1667 }
1668 break;
1669 }
1670
1671 if (scb->expect == 0) {
1672 /* Complete frame */
1673
1674 bt_cb(skb)->pkt_type = type;
1675 hci_recv_frame(skb);
1676
1677 hdev->reassembly[index] = NULL;
1678 return remain;
1679 }
1680 }
1681
1682 return remain;
1683}
1684
Marcel Holtmannef222012007-07-11 06:42:04 +02001685int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1686{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301687 int rem = 0;
1688
Marcel Holtmannef222012007-07-11 06:42:04 +02001689 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1690 return -EILSEQ;
1691
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001692 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001693 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301694 if (rem < 0)
1695 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001696
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301697 data += (count - rem);
1698 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001699 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001700
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301701 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001702}
1703EXPORT_SYMBOL(hci_recv_fragment);
1704
Suraj Sumangala99811512010-07-14 13:02:19 +05301705#define STREAM_REASSEMBLY 0
1706
1707int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1708{
1709 int type;
1710 int rem = 0;
1711
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001712 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301713 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1714
1715 if (!skb) {
1716 struct { char type; } *pkt;
1717
1718 /* Start of the frame */
1719 pkt = data;
1720 type = pkt->type;
1721
1722 data++;
1723 count--;
1724 } else
1725 type = bt_cb(skb)->pkt_type;
1726
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001727 rem = hci_reassembly(hdev, type, data, count,
1728 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301729 if (rem < 0)
1730 return rem;
1731
1732 data += (count - rem);
1733 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001734 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301735
1736 return rem;
1737}
1738EXPORT_SYMBOL(hci_recv_stream_fragment);
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740/* ---- Interface to upper protocols ---- */
1741
1742/* Register/Unregister protocols.
1743 * hci_task_lock is used to ensure that no tasks are running. */
1744int hci_register_proto(struct hci_proto *hp)
1745{
1746 int err = 0;
1747
1748 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1749
1750 if (hp->id >= HCI_MAX_PROTO)
1751 return -EINVAL;
1752
1753 write_lock_bh(&hci_task_lock);
1754
1755 if (!hci_proto[hp->id])
1756 hci_proto[hp->id] = hp;
1757 else
1758 err = -EEXIST;
1759
1760 write_unlock_bh(&hci_task_lock);
1761
1762 return err;
1763}
1764EXPORT_SYMBOL(hci_register_proto);
1765
1766int hci_unregister_proto(struct hci_proto *hp)
1767{
1768 int err = 0;
1769
1770 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1771
1772 if (hp->id >= HCI_MAX_PROTO)
1773 return -EINVAL;
1774
1775 write_lock_bh(&hci_task_lock);
1776
1777 if (hci_proto[hp->id])
1778 hci_proto[hp->id] = NULL;
1779 else
1780 err = -ENOENT;
1781
1782 write_unlock_bh(&hci_task_lock);
1783
1784 return err;
1785}
1786EXPORT_SYMBOL(hci_unregister_proto);
1787
1788int hci_register_cb(struct hci_cb *cb)
1789{
1790 BT_DBG("%p name %s", cb, cb->name);
1791
1792 write_lock_bh(&hci_cb_list_lock);
1793 list_add(&cb->list, &hci_cb_list);
1794 write_unlock_bh(&hci_cb_list_lock);
1795
1796 return 0;
1797}
1798EXPORT_SYMBOL(hci_register_cb);
1799
1800int hci_unregister_cb(struct hci_cb *cb)
1801{
1802 BT_DBG("%p name %s", cb, cb->name);
1803
1804 write_lock_bh(&hci_cb_list_lock);
1805 list_del(&cb->list);
1806 write_unlock_bh(&hci_cb_list_lock);
1807
1808 return 0;
1809}
1810EXPORT_SYMBOL(hci_unregister_cb);
1811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001812int hci_register_amp(struct amp_mgr_cb *cb)
1813{
1814 BT_DBG("%p", cb);
1815
1816 write_lock_bh(&amp_mgr_cb_list_lock);
1817 list_add(&cb->list, &amp_mgr_cb_list);
1818 write_unlock_bh(&amp_mgr_cb_list_lock);
1819
1820 return 0;
1821}
1822EXPORT_SYMBOL(hci_register_amp);
1823
1824int hci_unregister_amp(struct amp_mgr_cb *cb)
1825{
1826 BT_DBG("%p", cb);
1827
1828 write_lock_bh(&amp_mgr_cb_list_lock);
1829 list_del(&cb->list);
1830 write_unlock_bh(&amp_mgr_cb_list_lock);
1831
1832 return 0;
1833}
1834EXPORT_SYMBOL(hci_unregister_amp);
1835
1836void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1837 struct sk_buff *skb)
1838{
1839 struct amp_mgr_cb *cb;
1840
1841 BT_DBG("opcode 0x%x", opcode);
1842
1843 read_lock_bh(&amp_mgr_cb_list_lock);
1844 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1845 if (cb->amp_cmd_complete_event)
1846 cb->amp_cmd_complete_event(hdev, opcode, skb);
1847 }
1848 read_unlock_bh(&amp_mgr_cb_list_lock);
1849}
1850
1851void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1852{
1853 struct amp_mgr_cb *cb;
1854
1855 BT_DBG("opcode 0x%x, status %d", opcode, status);
1856
1857 read_lock_bh(&amp_mgr_cb_list_lock);
1858 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1859 if (cb->amp_cmd_status_event)
1860 cb->amp_cmd_status_event(hdev, opcode, status);
1861 }
1862 read_unlock_bh(&amp_mgr_cb_list_lock);
1863}
1864
1865void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1866 struct sk_buff *skb)
1867{
1868 struct amp_mgr_cb *cb;
1869
1870 BT_DBG("ev_code 0x%x", ev_code);
1871
1872 read_lock_bh(&amp_mgr_cb_list_lock);
1873 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1874 if (cb->amp_event)
1875 cb->amp_event(hdev, ev_code, skb);
1876 }
1877 read_unlock_bh(&amp_mgr_cb_list_lock);
1878}
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880static int hci_send_frame(struct sk_buff *skb)
1881{
1882 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1883
1884 if (!hdev) {
1885 kfree_skb(skb);
1886 return -ENODEV;
1887 }
1888
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001889 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
1891 if (atomic_read(&hdev->promisc)) {
1892 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001893 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001895 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 }
1897
1898 /* Get rid of skb owner, prior to sending to the driver. */
1899 skb_orphan(skb);
1900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001901 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 return hdev->send(skb);
1903}
1904
1905/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001906int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
1908 int len = HCI_COMMAND_HDR_SIZE + plen;
1909 struct hci_command_hdr *hdr;
1910 struct sk_buff *skb;
1911
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001912 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914 skb = bt_skb_alloc(len, GFP_ATOMIC);
1915 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001916 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return -ENOMEM;
1918 }
1919
1920 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001921 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 hdr->plen = plen;
1923
1924 if (plen)
1925 memcpy(skb_put(skb, plen), param, plen);
1926
1927 BT_DBG("skb len %d", skb->len);
1928
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001929 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001931
Johan Hedberga5040ef2011-01-10 13:28:59 +02001932 if (test_bit(HCI_INIT, &hdev->flags))
1933 hdev->init_last_cmd = opcode;
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001936 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 return 0;
1939}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001940EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001943void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 struct hci_command_hdr *hdr;
1946
1947 if (!hdev->sent_cmd)
1948 return NULL;
1949
1950 hdr = (void *) hdev->sent_cmd->data;
1951
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001952 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return NULL;
1954
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001955 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
1957 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1958}
1959
1960/* Send ACL data */
1961static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1962{
1963 struct hci_acl_hdr *hdr;
1964 int len = skb->len;
1965
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001966 skb_push(skb, HCI_ACL_HDR_SIZE);
1967 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001968 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001969 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1970 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
1974 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
1976 struct hci_dev *hdev = conn->hdev;
1977 struct sk_buff *list;
1978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
1981 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001982 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983 if (hdev->dev_type == HCI_BREDR)
1984 hci_add_acl_hdr(skb, conn->handle, flags);
1985 else
1986 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001988 list = skb_shinfo(skb)->frag_list;
1989 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* Non fragmented */
1991 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1992
1993 skb_queue_tail(&conn->data_q, skb);
1994 } else {
1995 /* Fragmented */
1996 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1997
1998 skb_shinfo(skb)->frag_list = NULL;
1999
2000 /* Queue all fragments atomically */
2001 spin_lock_bh(&conn->data_q.lock);
2002
2003 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002005 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 do {
2007 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002008
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002010 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002011 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
2013 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2014
2015 __skb_queue_tail(&conn->data_q, skb);
2016 } while (list);
2017
2018 spin_unlock_bh(&conn->data_q.lock);
2019 }
2020
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002021 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022}
2023EXPORT_SYMBOL(hci_send_acl);
2024
2025/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002026void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027{
2028 struct hci_dev *hdev = conn->hdev;
2029 struct hci_sco_hdr hdr;
2030
2031 BT_DBG("%s len %d", hdev->name, skb->len);
2032
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002033 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 hdr.dlen = skb->len;
2035
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002036 skb_push(skb, HCI_SCO_HDR_SIZE);
2037 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002038 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
2040 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002041 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002042
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002044 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045}
2046EXPORT_SYMBOL(hci_send_sco);
2047
2048/* ---- HCI TX task (outgoing data) ---- */
2049
2050/* HCI Connection scheduler */
2051static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2052{
2053 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002054 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 int num = 0, min = ~0;
2056 struct list_head *p;
2057
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002058 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 * added and removed with TX task disabled. */
2060 list_for_each(p, &h->list) {
2061 struct hci_conn *c;
2062 c = list_entry(p, struct hci_conn, list);
2063
Marcel Holtmann769be972008-07-14 20:13:49 +02002064 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002066
2067 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2068 continue;
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 num++;
2071
2072 if (c->sent < min) {
2073 min = c->sent;
2074 conn = c;
2075 }
2076 }
2077
2078 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002079 int cnt, q;
2080
2081 switch (conn->type) {
2082 case ACL_LINK:
2083 cnt = hdev->acl_cnt;
2084 break;
2085 case SCO_LINK:
2086 case ESCO_LINK:
2087 cnt = hdev->sco_cnt;
2088 break;
2089 case LE_LINK:
2090 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2091 break;
2092 default:
2093 cnt = 0;
2094 BT_ERR("Unknown link type");
2095 }
2096
2097 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 *quote = q ? q : 1;
2099 } else
2100 *quote = 0;
2101
2102 BT_DBG("conn %p quote %d", conn, *quote);
2103 return conn;
2104}
2105
Ville Tervobae1f5d2011-02-10 22:38:53 -03002106static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107{
2108 struct hci_conn_hash *h = &hdev->conn_hash;
2109 struct list_head *p;
2110 struct hci_conn *c;
2111
Ville Tervobae1f5d2011-02-10 22:38:53 -03002112 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 /* Kill stalled connections */
2115 list_for_each(p, &h->list) {
2116 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002117 if (c->type == type && c->sent) {
2118 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 hdev->name, batostr(&c->dst));
2120 hci_acl_disconn(c, 0x13);
2121 }
2122 }
2123}
2124
2125static inline void hci_sched_acl(struct hci_dev *hdev)
2126{
2127 struct hci_conn *conn;
2128 struct sk_buff *skb;
2129 int quote;
2130
2131 BT_DBG("%s", hdev->name);
2132
2133 if (!test_bit(HCI_RAW, &hdev->flags)) {
2134 /* ACL tx timeout must be longer than maximum
2135 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002136 if (hdev->acl_cnt <= 0 &&
2137 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002138 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 }
2140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141 while (hdev->acl_cnt > 0 &&
2142 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2143 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2144 int count = 1;
2145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002147
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002148 if (hdev->flow_ctl_mode ==
2149 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2150 /* Calculate count of blocks used by
2151 * this packet
2152 */
2153 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2154 hdev->data_block_len) + 1;
2155
2156 if (count > hdev->acl_cnt)
2157 return;
2158
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002159 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002160
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 hci_send_frame(skb);
2162 hdev->acl_last_tx = jiffies;
2163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164 hdev->acl_cnt -= count;
2165 quote -= count;
2166
2167 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 }
2169 }
2170}
2171
2172/* Schedule SCO */
2173static inline void hci_sched_sco(struct hci_dev *hdev)
2174{
2175 struct hci_conn *conn;
2176 struct sk_buff *skb;
2177 int quote;
2178
2179 BT_DBG("%s", hdev->name);
2180
2181 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2182 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2183 BT_DBG("skb %p len %d", skb, skb->len);
2184 hci_send_frame(skb);
2185
2186 conn->sent++;
2187 if (conn->sent == ~0)
2188 conn->sent = 0;
2189 }
2190 }
2191}
2192
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002193static inline void hci_sched_esco(struct hci_dev *hdev)
2194{
2195 struct hci_conn *conn;
2196 struct sk_buff *skb;
2197 int quote;
2198
2199 BT_DBG("%s", hdev->name);
2200
2201 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2202 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2203 BT_DBG("skb %p len %d", skb, skb->len);
2204 hci_send_frame(skb);
2205
2206 conn->sent++;
2207 if (conn->sent == ~0)
2208 conn->sent = 0;
2209 }
2210 }
2211}
2212
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002213static inline void hci_sched_le(struct hci_dev *hdev)
2214{
2215 struct hci_conn *conn;
2216 struct sk_buff *skb;
2217 int quote, cnt;
2218
2219 BT_DBG("%s", hdev->name);
2220
2221 if (!test_bit(HCI_RAW, &hdev->flags)) {
2222 /* LE tx timeout must be longer than maximum
2223 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002224 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002225 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002226 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002227 }
2228
2229 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2230 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2231 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2232 BT_DBG("skb %p len %d", skb, skb->len);
2233
2234 hci_send_frame(skb);
2235 hdev->le_last_tx = jiffies;
2236
2237 cnt--;
2238 conn->sent++;
2239 }
2240 }
2241 if (hdev->le_pkts)
2242 hdev->le_cnt = cnt;
2243 else
2244 hdev->acl_cnt = cnt;
2245}
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247static void hci_tx_task(unsigned long arg)
2248{
2249 struct hci_dev *hdev = (struct hci_dev *) arg;
2250 struct sk_buff *skb;
2251
2252 read_lock(&hci_task_lock);
2253
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002254 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2255 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
2257 /* Schedule queues and send stuff to HCI driver */
2258
2259 hci_sched_acl(hdev);
2260
2261 hci_sched_sco(hdev);
2262
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002263 hci_sched_esco(hdev);
2264
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002265 hci_sched_le(hdev);
2266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 /* Send next queued raw (unknown type) packet */
2268 while ((skb = skb_dequeue(&hdev->raw_q)))
2269 hci_send_frame(skb);
2270
2271 read_unlock(&hci_task_lock);
2272}
2273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002274/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
2276/* ACL data packet */
2277static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2278{
2279 struct hci_acl_hdr *hdr = (void *) skb->data;
2280 struct hci_conn *conn;
2281 __u16 handle, flags;
2282
2283 skb_pull(skb, HCI_ACL_HDR_SIZE);
2284
2285 handle = __le16_to_cpu(hdr->handle);
2286 flags = hci_flags(handle);
2287 handle = hci_handle(handle);
2288
2289 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2290
2291 hdev->stat.acl_rx++;
2292
2293 hci_dev_lock(hdev);
2294 conn = hci_conn_hash_lookup_handle(hdev, handle);
2295 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 if (conn) {
2298 register struct hci_proto *hp;
2299
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002300 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002303 hp = hci_proto[HCI_PROTO_L2CAP];
2304 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 hp->recv_acldata(conn, skb, flags);
2306 return;
2307 }
2308 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002309 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 hdev->name, handle);
2311 }
2312
2313 kfree_skb(skb);
2314}
2315
2316/* SCO data packet */
2317static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2318{
2319 struct hci_sco_hdr *hdr = (void *) skb->data;
2320 struct hci_conn *conn;
2321 __u16 handle;
2322
2323 skb_pull(skb, HCI_SCO_HDR_SIZE);
2324
2325 handle = __le16_to_cpu(hdr->handle);
2326
2327 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2328
2329 hdev->stat.sco_rx++;
2330
2331 hci_dev_lock(hdev);
2332 conn = hci_conn_hash_lookup_handle(hdev, handle);
2333 hci_dev_unlock(hdev);
2334
2335 if (conn) {
2336 register struct hci_proto *hp;
2337
2338 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002339 hp = hci_proto[HCI_PROTO_SCO];
2340 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 hp->recv_scodata(conn, skb);
2342 return;
2343 }
2344 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002345 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 hdev->name, handle);
2347 }
2348
2349 kfree_skb(skb);
2350}
2351
Marcel Holtmann65164552005-10-28 19:20:48 +02002352static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
2354 struct hci_dev *hdev = (struct hci_dev *) arg;
2355 struct sk_buff *skb;
2356
2357 BT_DBG("%s", hdev->name);
2358
2359 read_lock(&hci_task_lock);
2360
2361 while ((skb = skb_dequeue(&hdev->rx_q))) {
2362 if (atomic_read(&hdev->promisc)) {
2363 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002364 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 }
2366
2367 if (test_bit(HCI_RAW, &hdev->flags)) {
2368 kfree_skb(skb);
2369 continue;
2370 }
2371
2372 if (test_bit(HCI_INIT, &hdev->flags)) {
2373 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002374 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 case HCI_ACLDATA_PKT:
2376 case HCI_SCODATA_PKT:
2377 kfree_skb(skb);
2378 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 }
2381
2382 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002383 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 case HCI_EVENT_PKT:
2385 hci_event_packet(hdev, skb);
2386 break;
2387
2388 case HCI_ACLDATA_PKT:
2389 BT_DBG("%s ACL data packet", hdev->name);
2390 hci_acldata_packet(hdev, skb);
2391 break;
2392
2393 case HCI_SCODATA_PKT:
2394 BT_DBG("%s SCO data packet", hdev->name);
2395 hci_scodata_packet(hdev, skb);
2396 break;
2397
2398 default:
2399 kfree_skb(skb);
2400 break;
2401 }
2402 }
2403
2404 read_unlock(&hci_task_lock);
2405}
2406
2407static void hci_cmd_task(unsigned long arg)
2408{
2409 struct hci_dev *hdev = (struct hci_dev *) arg;
2410 struct sk_buff *skb;
2411
2412 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002415 if (atomic_read(&hdev->cmd_cnt)) {
2416 skb = skb_dequeue(&hdev->cmd_q);
2417 if (!skb)
2418 return;
2419
Wei Yongjun7585b972009-02-25 18:29:52 +08002420 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002422 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2423 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 atomic_dec(&hdev->cmd_cnt);
2425 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002426 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002427 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 } else {
2429 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002430 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 }
2432 }
2433}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002434
2435module_param(enable_smp, bool, 0644);
2436MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");