blob: da8b2dc73144f5c80a5895e99d80767d6d023885 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Brian Gix3cd62042012-01-11 15:18:17 -08003 Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Steve Mucklef132c6c2012-06-06 18:30:57 -070063static bool enable_smp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200218 }
219 skb_queue_purge(&hdev->driver_init);
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Mandatory initialization */
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200227 }
228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
308}
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __u8 scan = opt;
313
314 BT_DBG("%s %x", hdev->name, scan);
315
316 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200317 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
320static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
321{
322 __u8 auth = opt;
323
324 BT_DBG("%s %x", hdev->name, auth);
325
326 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200327 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
331{
332 __u8 encrypt = opt;
333
334 BT_DBG("%s %x", hdev->name, encrypt);
335
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200336 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200337 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338}
339
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200340static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
341{
342 __le16 policy = cpu_to_le16(opt);
343
Marcel Holtmanna418b892008-11-30 12:17:28 +0100344 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200345
346 /* Default link policy */
347 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
348}
349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900350/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 * Device is held on return. */
352struct hci_dev *hci_dev_get(int index)
353{
354 struct hci_dev *hdev = NULL;
355 struct list_head *p;
356
357 BT_DBG("%d", index);
358
359 if (index < 0)
360 return NULL;
361
362 read_lock(&hci_dev_list_lock);
363 list_for_each(p, &hci_dev_list) {
364 struct hci_dev *d = list_entry(p, struct hci_dev, list);
365 if (d->id == index) {
366 hdev = hci_dev_hold(d);
367 break;
368 }
369 }
370 read_unlock(&hci_dev_list_lock);
371 return hdev;
372}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375/* ---- Inquiry support ---- */
376static void inquiry_cache_flush(struct hci_dev *hdev)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *next = cache->list, *e;
380
381 BT_DBG("cache %p", cache);
382
383 cache->list = NULL;
384 while ((e = next)) {
385 next = e->next;
386 kfree(e);
387 }
388}
389
390struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_entry *e;
394
395 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
396
397 for (e = cache->list; e; e = e->next)
398 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200399 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 return e;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
404{
405 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200406 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
408 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
409
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200410 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
411 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200413 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
414 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 return;
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200416
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200417 ie->next = cache->list;
418 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
420
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200421 memcpy(&ie->data, data, sizeof(*data));
422 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 cache->timestamp = jiffies;
424}
425
426static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
427{
428 struct inquiry_cache *cache = &hdev->inq_cache;
429 struct inquiry_info *info = (struct inquiry_info *) buf;
430 struct inquiry_entry *e;
431 int copied = 0;
432
433 for (e = cache->list; e && copied < num; e = e->next, copied++) {
434 struct inquiry_data *data = &e->data;
435 bacpy(&info->bdaddr, &data->bdaddr);
436 info->pscan_rep_mode = data->pscan_rep_mode;
437 info->pscan_period_mode = data->pscan_period_mode;
438 info->pscan_mode = data->pscan_mode;
439 memcpy(info->dev_class, data->dev_class, 3);
440 info->clock_offset = data->clock_offset;
441 info++;
442 }
443
444 BT_DBG("cache %p, copied %d", cache, copied);
445 return copied;
446}
447
448static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
449{
450 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
451 struct hci_cp_inquiry cp;
452
453 BT_DBG("%s", hdev->name);
454
455 if (test_bit(HCI_INQUIRY, &hdev->flags))
456 return;
457
458 /* Start Inquiry */
459 memcpy(&cp.lap, &ir->lap, 3);
460 cp.length = ir->length;
461 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200462 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463}
464
465int hci_inquiry(void __user *arg)
466{
467 __u8 __user *ptr = arg;
468 struct hci_inquiry_req ir;
469 struct hci_dev *hdev;
470 int err = 0, do_inquiry = 0, max_rsp;
471 long timeo;
472 __u8 *buf;
473
474 if (copy_from_user(&ir, ptr, sizeof(ir)))
475 return -EFAULT;
476
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200477 hdev = hci_dev_get(ir.dev_id);
478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 return -ENODEV;
480
481 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900482 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 inquiry_cache_empty(hdev) ||
484 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 inquiry_cache_flush(hdev);
486 do_inquiry = 1;
487 }
488 hci_dev_unlock_bh(hdev);
489
Marcel Holtmann04837f62006-07-03 10:02:33 +0200490 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491
492 if (do_inquiry) {
493 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
494 if (err < 0)
495 goto done;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* for unlimited number of responses we will use buffer with 255 entries */
499 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
500
501 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
502 * copy it to the user space.
503 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100504 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200505 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 err = -ENOMEM;
507 goto done;
508 }
509
510 hci_dev_lock_bh(hdev);
511 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
512 hci_dev_unlock_bh(hdev);
513
514 BT_DBG("num_rsp %d", ir.num_rsp);
515
516 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
517 ptr += sizeof(ir);
518 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 ir.num_rsp))
520 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900521 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 err = -EFAULT;
523
524 kfree(buf);
525
526done:
527 hci_dev_put(hdev);
528 return err;
529}
530
531/* ---- HCI ioctl helpers ---- */
532
533int hci_dev_open(__u16 dev)
534{
535 struct hci_dev *hdev;
536 int ret = 0;
537
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200538 hdev = hci_dev_get(dev);
539 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 return -ENODEV;
541
542 BT_DBG("%s %p", hdev->name, hdev);
543
544 hci_req_lock(hdev);
545
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200546 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
547 ret = -ERFKILL;
548 goto done;
549 }
550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 if (test_bit(HCI_UP, &hdev->flags)) {
552 ret = -EALREADY;
553 goto done;
554 }
555
556 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
557 set_bit(HCI_RAW, &hdev->flags);
558
559 if (hdev->open(hdev)) {
560 ret = -EIO;
561 goto done;
562 }
563
Bhasker Netia6e6a4f2012-01-27 15:25:43 +0530564 if (!skb_queue_empty(&hdev->cmd_q)) {
565 BT_ERR("command queue is not empty, purging");
566 skb_queue_purge(&hdev->cmd_q);
567 }
568 if (!skb_queue_empty(&hdev->rx_q)) {
569 BT_ERR("rx queue is not empty, purging");
570 skb_queue_purge(&hdev->rx_q);
571 }
572 if (!skb_queue_empty(&hdev->raw_q)) {
573 BT_ERR("raw queue is not empty, purging");
574 skb_queue_purge(&hdev->raw_q);
575 }
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (!test_bit(HCI_RAW, &hdev->flags)) {
578 atomic_set(&hdev->cmd_cnt, 1);
579 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200580 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581
Marcel Holtmann04837f62006-07-03 10:02:33 +0200582 ret = __hci_request(hdev, hci_init_req, 0,
583 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700585 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300586 ret = __hci_request(hdev, hci_le_init_req, 0,
587 msecs_to_jiffies(HCI_INIT_TIMEOUT));
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 clear_bit(HCI_INIT, &hdev->flags);
590 }
591
592 if (!ret) {
593 hci_dev_hold(hdev);
594 set_bit(HCI_UP, &hdev->flags);
595 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700596 if (!test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800597 hdev->dev_type == HCI_BREDR) {
598 hci_dev_lock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200599 mgmt_powered(hdev->id, 1);
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800600 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200601 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900602 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 /* Init failed, cleanup */
604 tasklet_kill(&hdev->rx_task);
605 tasklet_kill(&hdev->tx_task);
606 tasklet_kill(&hdev->cmd_task);
607
608 skb_queue_purge(&hdev->cmd_q);
609 skb_queue_purge(&hdev->rx_q);
610
611 if (hdev->flush)
612 hdev->flush(hdev);
613
614 if (hdev->sent_cmd) {
615 kfree_skb(hdev->sent_cmd);
616 hdev->sent_cmd = NULL;
617 }
618
619 hdev->close(hdev);
620 hdev->flags = 0;
621 }
622
623done:
624 hci_req_unlock(hdev);
625 hci_dev_put(hdev);
626 return ret;
627}
628
Mat Martineau3b9239a2012-02-16 11:54:30 -0800629static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630{
Mat Martineau4106b992011-11-18 15:26:21 -0800631 unsigned long keepflags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 BT_DBG("%s %p", hdev->name, hdev);
Andre Guedes28b75a82012-02-03 17:48:00 -0300634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 hci_req_cancel(hdev, ENODEV);
636 hci_req_lock(hdev);
637
638 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300639 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 hci_req_unlock(hdev);
641 return 0;
642 }
643
644 /* Kill RX and TX tasks */
645 tasklet_kill(&hdev->rx_task);
646 tasklet_kill(&hdev->tx_task);
647
648 hci_dev_lock_bh(hdev);
649 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800650 hci_conn_hash_flush(hdev, is_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 hci_dev_unlock_bh(hdev);
652
653 hci_notify(hdev, HCI_DEV_DOWN);
654
Bhasker Netiffdff572011-12-21 17:24:01 -0800655 if (hdev->dev_type == HCI_BREDR) {
656 hci_dev_lock_bh(hdev);
657 mgmt_powered(hdev->id, 0);
658 hci_dev_unlock_bh(hdev);
659 }
660
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 if (hdev->flush)
662 hdev->flush(hdev);
663
664 /* Reset device */
665 skb_queue_purge(&hdev->cmd_q);
666 atomic_set(&hdev->cmd_cnt, 1);
667 if (!test_bit(HCI_RAW, &hdev->flags)) {
668 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200669 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200670 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 clear_bit(HCI_INIT, &hdev->flags);
672 }
673
674 /* Kill cmd task */
675 tasklet_kill(&hdev->cmd_task);
676
677 /* Drop queues */
678 skb_queue_purge(&hdev->rx_q);
679 skb_queue_purge(&hdev->cmd_q);
680 skb_queue_purge(&hdev->raw_q);
681
682 /* Drop last sent command */
683 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300684 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 kfree_skb(hdev->sent_cmd);
686 hdev->sent_cmd = NULL;
687 }
688
689 /* After this point our queues are empty
690 * and no tasks are scheduled. */
691 hdev->close(hdev);
692
Mat Martineau4106b992011-11-18 15:26:21 -0800693 /* Clear only non-persistent flags */
694 if (test_bit(HCI_MGMT, &hdev->flags))
695 set_bit(HCI_MGMT, &keepflags);
696 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
697 set_bit(HCI_LINK_KEYS, &keepflags);
698 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
699 set_bit(HCI_DEBUG_KEYS, &keepflags);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200700
Mat Martineau4106b992011-11-18 15:26:21 -0800701 hdev->flags = keepflags;
Johan Hedberge59fda82012-02-22 18:11:53 +0200702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 hci_req_unlock(hdev);
704
705 hci_dev_put(hdev);
706 return 0;
707}
708
709int hci_dev_close(__u16 dev)
710{
711 struct hci_dev *hdev;
712 int err;
713
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200714 hdev = hci_dev_get(dev);
715 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return -ENODEV;
Mat Martineau3b9239a2012-02-16 11:54:30 -0800717 err = hci_dev_do_close(hdev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 hci_dev_put(hdev);
719 return err;
720}
721
722int hci_dev_reset(__u16 dev)
723{
724 struct hci_dev *hdev;
725 int ret = 0;
726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200727 hdev = hci_dev_get(dev);
728 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return -ENODEV;
730
731 hci_req_lock(hdev);
732 tasklet_disable(&hdev->tx_task);
733
734 if (!test_bit(HCI_UP, &hdev->flags))
735 goto done;
736
737 /* Drop queues */
738 skb_queue_purge(&hdev->rx_q);
739 skb_queue_purge(&hdev->cmd_q);
740
741 hci_dev_lock_bh(hdev);
742 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800743 hci_conn_hash_flush(hdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 hci_dev_unlock_bh(hdev);
745
746 if (hdev->flush)
747 hdev->flush(hdev);
748
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900749 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300750 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
752 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200753 ret = __hci_request(hdev, hci_reset_req, 0,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755
756done:
757 tasklet_enable(&hdev->tx_task);
758 hci_req_unlock(hdev);
759 hci_dev_put(hdev);
760 return ret;
761}
762
763int hci_dev_reset_stat(__u16 dev)
764{
765 struct hci_dev *hdev;
766 int ret = 0;
767
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200768 hdev = hci_dev_get(dev);
769 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 return -ENODEV;
771
772 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
773
774 hci_dev_put(hdev);
775
776 return ret;
777}
778
779int hci_dev_cmd(unsigned int cmd, void __user *arg)
780{
781 struct hci_dev *hdev;
782 struct hci_dev_req dr;
783 int err = 0;
784
785 if (copy_from_user(&dr, arg, sizeof(dr)))
786 return -EFAULT;
787
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200788 hdev = hci_dev_get(dr.dev_id);
789 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return -ENODEV;
791
792 switch (cmd) {
793 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200794 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 break;
797
798 case HCISETENCRYPT:
799 if (!lmp_encrypt_capable(hdev)) {
800 err = -EOPNOTSUPP;
801 break;
802 }
803
804 if (!test_bit(HCI_AUTH, &hdev->flags)) {
805 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200806 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
807 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 if (err)
809 break;
810 }
811
Marcel Holtmann04837f62006-07-03 10:02:33 +0200812 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
813 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 break;
815
816 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200817 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
818 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 break;
820
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200821 case HCISETLINKPOL:
822 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
823 msecs_to_jiffies(HCI_INIT_TIMEOUT));
824 break;
825
826 case HCISETLINKMODE:
827 hdev->link_mode = ((__u16) dr.dev_opt) &
828 (HCI_LM_MASTER | HCI_LM_ACCEPT);
829 break;
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 case HCISETPTYPE:
832 hdev->pkt_type = (__u16) dr.dev_opt;
833 break;
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200836 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
837 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 break;
839
840 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200841 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
842 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 break;
844
845 default:
846 err = -EINVAL;
847 break;
848 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 hci_dev_put(hdev);
851 return err;
852}
853
854int hci_get_dev_list(void __user *arg)
855{
856 struct hci_dev_list_req *dl;
857 struct hci_dev_req *dr;
858 struct list_head *p;
859 int n = 0, size, err;
860 __u16 dev_num;
861
862 if (get_user(dev_num, (__u16 __user *) arg))
863 return -EFAULT;
864
865 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
866 return -EINVAL;
867
868 size = sizeof(*dl) + dev_num * sizeof(*dr);
869
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200870 dl = kzalloc(size, GFP_KERNEL);
871 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 return -ENOMEM;
873
874 dr = dl->dev_req;
875
876 read_lock_bh(&hci_dev_list_lock);
877 list_for_each(p, &hci_dev_list) {
878 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 hdev = list_entry(p, struct hci_dev, list);
881
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200882 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200883
884 if (!test_bit(HCI_MGMT, &hdev->flags))
885 set_bit(HCI_PAIRABLE, &hdev->flags);
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 (dr + n)->dev_id = hdev->id;
888 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 if (++n >= dev_num)
891 break;
892 }
893 read_unlock_bh(&hci_dev_list_lock);
894
895 dl->dev_num = n;
896 size = sizeof(*dl) + n * sizeof(*dr);
897
898 err = copy_to_user(arg, dl, size);
899 kfree(dl);
900
901 return err ? -EFAULT : 0;
902}
903
904int hci_get_dev_info(void __user *arg)
905{
906 struct hci_dev *hdev;
907 struct hci_dev_info di;
908 int err = 0;
909
910 if (copy_from_user(&di, arg, sizeof(di)))
911 return -EFAULT;
912
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200913 hdev = hci_dev_get(di.dev_id);
914 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 return -ENODEV;
916
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200917 hci_del_off_timer(hdev);
918
Johan Hedbergc542a062011-01-26 13:11:03 +0200919 if (!test_bit(HCI_MGMT, &hdev->flags))
920 set_bit(HCI_PAIRABLE, &hdev->flags);
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 strcpy(di.name, hdev->name);
923 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100924 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 di.flags = hdev->flags;
926 di.pkt_type = hdev->pkt_type;
927 di.acl_mtu = hdev->acl_mtu;
928 di.acl_pkts = hdev->acl_pkts;
929 di.sco_mtu = hdev->sco_mtu;
930 di.sco_pkts = hdev->sco_pkts;
931 di.link_policy = hdev->link_policy;
932 di.link_mode = hdev->link_mode;
933
934 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
935 memcpy(&di.features, &hdev->features, sizeof(di.features));
936
937 if (copy_to_user(arg, &di, sizeof(di)))
938 err = -EFAULT;
939
940 hci_dev_put(hdev);
941
942 return err;
943}
944
945/* ---- Interface to HCI drivers ---- */
946
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200947static int hci_rfkill_set_block(void *data, bool blocked)
948{
949 struct hci_dev *hdev = data;
950
951 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
952
953 if (!blocked)
954 return 0;
955
Mat Martineau3b9239a2012-02-16 11:54:30 -0800956 hci_dev_do_close(hdev, 0);
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200957
958 return 0;
959}
960
961static const struct rfkill_ops hci_rfkill_ops = {
962 .set_block = hci_rfkill_set_block,
963};
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965/* Alloc HCI device */
966struct hci_dev *hci_alloc_dev(void)
967{
968 struct hci_dev *hdev;
969
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200970 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 if (!hdev)
972 return NULL;
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 skb_queue_head_init(&hdev->driver_init);
975
976 return hdev;
977}
978EXPORT_SYMBOL(hci_alloc_dev);
979
980/* Free HCI device */
981void hci_free_dev(struct hci_dev *hdev)
982{
983 skb_queue_purge(&hdev->driver_init);
984
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200985 /* will free via device release */
986 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987}
988EXPORT_SYMBOL(hci_free_dev);
989
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200990static void hci_power_on(struct work_struct *work)
991{
992 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700993 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200994
995 BT_DBG("%s", hdev->name);
996
Inga Stotland5029fc22011-09-12 15:22:52 -0700997 err = hci_dev_open(hdev->id);
998 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200999 return;
1000
Peter Krystad1fc44072011-08-30 15:38:12 -07001001 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
1002 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001003 mod_timer(&hdev->off_timer,
1004 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1005
Peter Krystad1fc44072011-08-30 15:38:12 -07001006 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
1007 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001008 mgmt_index_added(hdev->id);
1009}
1010
1011static void hci_power_off(struct work_struct *work)
1012{
1013 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
1014
1015 BT_DBG("%s", hdev->name);
1016
1017 hci_dev_close(hdev->id);
1018}
1019
1020static void hci_auto_off(unsigned long data)
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001021{
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001022 struct hci_dev *hdev = (struct hci_dev *) data;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001023
1024 BT_DBG("%s", hdev->name);
1025
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001026 clear_bit(HCI_AUTO_OFF, &hdev->flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001027
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001028 queue_work(hdev->workqueue, &hdev->power_off);
1029}
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001030
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001031void hci_del_off_timer(struct hci_dev *hdev)
1032{
1033 BT_DBG("%s", hdev->name);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001034
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001035 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1036 del_timer(&hdev->off_timer);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001037}
1038
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001039int hci_uuids_clear(struct hci_dev *hdev)
1040{
1041 struct list_head *p, *n;
1042
1043 list_for_each_safe(p, n, &hdev->uuids) {
1044 struct bt_uuid *uuid;
1045
1046 uuid = list_entry(p, struct bt_uuid, list);
1047
1048 list_del(p);
1049 kfree(uuid);
1050 }
1051
1052 return 0;
1053}
1054
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001055int hci_link_keys_clear(struct hci_dev *hdev)
1056{
1057 struct list_head *p, *n;
1058
1059 list_for_each_safe(p, n, &hdev->link_keys) {
1060 struct link_key *key;
1061
1062 key = list_entry(p, struct link_key, list);
1063
1064 list_del(p);
1065 kfree(key);
1066 }
1067
1068 return 0;
1069}
1070
1071struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1072{
1073 struct list_head *p;
1074
1075 list_for_each(p, &hdev->link_keys) {
1076 struct link_key *k;
1077
1078 k = list_entry(p, struct link_key, list);
1079
1080 if (bacmp(bdaddr, &k->bdaddr) == 0)
1081 return k;
1082 }
1083
1084 return NULL;
1085}
1086
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001087struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001088{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001089 struct list_head *p;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001091 list_for_each(p, &hdev->link_keys) {
1092 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001093 struct key_master_id *id;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 k = list_entry(p, struct link_key, list);
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001096
Brian Gixcf956772011-10-20 15:18:51 -07001097 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001098 continue;
1099
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001100 if (k->dlen != sizeof(*id))
1101 continue;
1102
1103 id = (void *) &k->data;
1104 if (id->ediv == ediv &&
1105 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1106 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001107 }
1108
1109 return NULL;
1110}
1111EXPORT_SYMBOL(hci_find_ltk);
1112
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001113struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1114 bdaddr_t *bdaddr, u8 type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001115{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001116 struct list_head *p;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001118 list_for_each(p, &hdev->link_keys) {
1119 struct link_key *k;
1120
1121 k = list_entry(p, struct link_key, list);
1122
Brian Gixcf956772011-10-20 15:18:51 -07001123 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001124 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 }
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001126
1127 return NULL;
1128}
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001129EXPORT_SYMBOL(hci_find_link_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1132 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001133{
1134 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001135 struct hci_conn *conn;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301136 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001137 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001138
1139 old_key = hci_find_link_key(hdev, bdaddr);
1140 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001141 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001142 key = old_key;
1143 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001145 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1146 if (!key)
1147 return -ENOMEM;
1148 list_add(&key->list, &hdev->link_keys);
1149 }
1150
1151 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1152
1153 bacpy(&key->bdaddr, bdaddr);
1154 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001155 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001156 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001157 key->pin_len = pin_len;
1158
Brian Gixa68668b2011-08-11 15:49:36 -07001159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301160 /* Store the link key persistently if one of the following is true:
1161 * 1. the remote side is using dedicated bonding since in that case
1162 * also the local requirements are set to dedicated bonding
1163 * 2. the local side had dedicated bonding as a requirement
1164 * 3. this is a legacy link key
1165 * 4. this is a changed combination key and there was a previously
1166 * stored one
1167 * If none of the above match only keep the link key around for
1168 * this connection and set the temporary flag for the device.
1169 */
Johan Hedberg4748fed2011-04-28 11:29:02 -07001170
Brian Gixdfdd9362011-08-18 09:58:02 -07001171 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301172 if ((conn->remote_auth > 0x01) ||
1173 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001174 (key->key_type < 0x03) ||
1175 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001176 bonded = 1;
1177 }
Johan Hedberg4df378a2011-04-28 11:29:03 -07001178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001180 mgmt_new_key(hdev->id, key, bonded);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001182 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001183 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001184
1185 return 0;
1186}
1187
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001188int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001189 u8 addr_type, u8 key_size, u8 auth,
1190 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001191{
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001192 struct link_key *key, *old_key;
1193 struct key_master_id *id;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001194
Brian Gixcf956772011-10-20 15:18:51 -07001195 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1196 batostr(bdaddr), addr_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001199 if (old_key) {
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001200 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001201 } else {
1202 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001203 if (!key)
1204 return -ENOMEM;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001205 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001206 }
1207
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001208 key->dlen = sizeof(*id);
1209
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001210 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001211 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001212 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001213 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001214 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001215 key->auth = auth;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001217 id = (void *) &key->data;
1218 id->ediv = ediv;
1219 memcpy(id->rand, rand, sizeof(id->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001220
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001221 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001222 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001223
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001224 return 0;
1225}
1226
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001227int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1228{
1229 struct link_key *key;
1230
1231 key = hci_find_link_key(hdev, bdaddr);
1232 if (!key)
1233 return -ENOENT;
1234
1235 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1236
1237 list_del(&key->list);
1238 kfree(key);
1239
1240 return 0;
1241}
1242
Ville Tervo6bd32322011-02-16 16:32:41 +02001243/* HCI command timer function */
1244static void hci_cmd_timer(unsigned long arg)
1245{
1246 struct hci_dev *hdev = (void *) arg;
1247
1248 BT_ERR("%s command tx timeout", hdev->name);
1249 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001251 tasklet_schedule(&hdev->cmd_task);
1252}
1253
Szymon Janc2763eda2011-03-22 13:12:22 +01001254struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1255 bdaddr_t *bdaddr)
1256{
1257 struct oob_data *data;
1258
1259 list_for_each_entry(data, &hdev->remote_oob_data, list)
1260 if (bacmp(bdaddr, &data->bdaddr) == 0)
1261 return data;
1262
1263 return NULL;
1264}
1265
1266int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1267{
1268 struct oob_data *data;
1269
1270 data = hci_find_remote_oob_data(hdev, bdaddr);
1271 if (!data)
1272 return -ENOENT;
1273
1274 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1275
1276 list_del(&data->list);
1277 kfree(data);
1278
1279 return 0;
1280}
1281
1282int hci_remote_oob_data_clear(struct hci_dev *hdev)
1283{
1284 struct oob_data *data, *n;
1285
1286 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1287 list_del(&data->list);
1288 kfree(data);
1289 }
1290
1291 return 0;
1292}
1293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001294static void hci_adv_clear(unsigned long arg)
1295{
1296 struct hci_dev *hdev = (void *) arg;
1297
1298 hci_adv_entries_clear(hdev);
1299}
1300
1301int hci_adv_entries_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
Brian Gixa68668b2011-08-11 15:49:36 -07001305 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001306 write_lock_bh(&hdev->adv_entries_lock);
1307
1308 list_for_each_safe(p, n, &hdev->adv_entries) {
1309 struct adv_entry *entry;
1310
1311 entry = list_entry(p, struct adv_entry, list);
1312
1313 list_del(p);
1314 kfree(entry);
1315 }
1316
1317 write_unlock_bh(&hdev->adv_entries_lock);
1318
1319 return 0;
1320}
1321
1322struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1323{
1324 struct list_head *p;
1325 struct adv_entry *res = NULL;
1326
Brian Gixa68668b2011-08-11 15:49:36 -07001327 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001328 read_lock_bh(&hdev->adv_entries_lock);
1329
1330 list_for_each(p, &hdev->adv_entries) {
1331 struct adv_entry *entry;
1332
1333 entry = list_entry(p, struct adv_entry, list);
1334
1335 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1336 res = entry;
1337 goto out;
1338 }
1339 }
1340out:
1341 read_unlock_bh(&hdev->adv_entries_lock);
1342 return res;
1343}
1344
1345static inline int is_connectable_adv(u8 evt_type)
1346{
1347 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1348 return 1;
1349
1350 return 0;
1351}
1352
Szymon Janc2763eda2011-03-22 13:12:22 +01001353int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1354 u8 *randomizer)
1355{
1356 struct oob_data *data;
1357
1358 data = hci_find_remote_oob_data(hdev, bdaddr);
1359
1360 if (!data) {
1361 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1362 if (!data)
1363 return -ENOMEM;
1364
1365 bacpy(&data->bdaddr, bdaddr);
1366 list_add(&data->list, &hdev->remote_oob_data);
1367 }
1368
1369 memcpy(data->hash, hash, sizeof(data->hash));
1370 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1371
1372 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1373
1374 return 0;
1375}
1376
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001377int hci_add_adv_entry(struct hci_dev *hdev,
1378 struct hci_ev_le_advertising_info *ev)
Andre Guedes76c86862011-05-26 16:23:50 -03001379{
1380 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001381 u8 flags = 0;
1382 int i;
Andre Guedes76c86862011-05-26 16:23:50 -03001383
Brian Gixa68668b2011-08-11 15:49:36 -07001384 BT_DBG("");
Andre Guedes76c86862011-05-26 16:23:50 -03001385
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001386 if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001387 return -EINVAL;
1388
Brian Gixfdd38922011-09-28 16:23:48 -07001389 if (ev->data && ev->length) {
1390 for (i = 0; (i + 2) < ev->length; i++)
1391 if (ev->data[i+1] == 0x01) {
1392 flags = ev->data[i+2];
1393 BT_DBG("flags: %2.2x", flags);
1394 break;
1395 } else {
1396 i += ev->data[i];
1397 }
1398 }
1399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes76c86862011-05-26 16:23:50 -03001401 /* Only new entries should be added to adv_entries. So, if
1402 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001403 if (entry) {
1404 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001405 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001406 }
Andre Guedes76c86862011-05-26 16:23:50 -03001407
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001408 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
Andre Guedes76c86862011-05-26 16:23:50 -03001409 if (!entry)
1410 return -ENOMEM;
1411
1412 bacpy(&entry->bdaddr, &ev->bdaddr);
1413 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001414 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 write_lock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001417 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 write_unlock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001419
1420 return 0;
1421}
1422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423static struct crypto_blkcipher *alloc_cypher(void)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001424{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001425 if (enable_smp)
1426 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 return ERR_PTR(-ENOTSUPP);
Andre Guedes28b75a82012-02-03 17:48:00 -03001429}
1430
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431/* Register HCI device */
1432int hci_register_dev(struct hci_dev *hdev)
1433{
1434 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001435 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001437 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1438 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
1440 if (!hdev->open || !hdev->close || !hdev->destruct)
1441 return -EINVAL;
1442
Mat Martineau08add512011-11-02 16:18:36 -07001443 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 write_lock_bh(&hci_dev_list_lock);
1446
1447 /* Find first available device id */
1448 list_for_each(p, &hci_dev_list) {
1449 if (list_entry(p, struct hci_dev, list)->id != id)
1450 break;
1451 head = p; id++;
1452 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 sprintf(hdev->name, "hci%d", id);
1455 hdev->id = id;
1456 list_add(&hdev->list, head);
1457
1458 atomic_set(&hdev->refcnt, 1);
1459 spin_lock_init(&hdev->lock);
1460
1461 hdev->flags = 0;
1462 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001463 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001465 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
Marcel Holtmann04837f62006-07-03 10:02:33 +02001467 hdev->idle_timeout = 0;
1468 hdev->sniff_max_interval = 800;
1469 hdev->sniff_min_interval = 80;
1470
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001471 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1473 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1474
1475 skb_queue_head_init(&hdev->rx_q);
1476 skb_queue_head_init(&hdev->cmd_q);
1477 skb_queue_head_init(&hdev->raw_q);
1478
Ville Tervo6bd32322011-02-16 16:32:41 +02001479 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
Brian Gix568dde92012-01-11 16:18:04 -08001480 setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
1481 (unsigned long) hdev);
1482 setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
1483 (unsigned long) hdev);
Ville Tervo6bd32322011-02-16 16:32:41 +02001484
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301485 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001486 hdev->reassembly[i] = NULL;
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001489 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 inquiry_cache_init(hdev);
1492
1493 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 hci_chan_list_init(hdev);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001495
David Millerea4bd8b2010-07-30 21:54:49 -07001496 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001497
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001498 INIT_LIST_HEAD(&hdev->uuids);
1499
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001500 INIT_LIST_HEAD(&hdev->link_keys);
1501
Szymon Janc2763eda2011-03-22 13:12:22 +01001502 INIT_LIST_HEAD(&hdev->remote_oob_data);
1503
Andre Guedes76c86862011-05-26 16:23:50 -03001504 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001505 rwlock_init(&hdev->adv_entries_lock);
1506 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001507
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001508 INIT_WORK(&hdev->power_on, hci_power_on);
1509 INIT_WORK(&hdev->power_off, hci_power_off);
1510 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001511
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1513
1514 atomic_set(&hdev->promisc, 0);
1515
1516 write_unlock_bh(&hci_dev_list_lock);
Andre Guedes28b75a82012-02-03 17:48:00 -03001517
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001518 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1519 if (!hdev->workqueue)
1520 goto nomem;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001523 if (IS_ERR(hdev->tfm))
1524 BT_INFO("Failed to load transform for ecb(aes): %ld",
1525 PTR_ERR(hdev->tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
1527 hci_register_sysfs(hdev);
1528
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001529 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1530 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1531 if (hdev->rfkill) {
1532 if (rfkill_register(hdev->rfkill) < 0) {
1533 rfkill_destroy(hdev->rfkill);
1534 hdev->rfkill = NULL;
1535 }
1536 }
1537
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001538 set_bit(HCI_AUTO_OFF, &hdev->flags);
1539 set_bit(HCI_SETUP, &hdev->flags);
1540 queue_work(hdev->workqueue, &hdev->power_on);
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 hci_notify(hdev, HCI_DEV_REG);
1543
1544 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001545
1546nomem:
1547 write_lock_bh(&hci_dev_list_lock);
1548 list_del(&hdev->list);
1549 write_unlock_bh(&hci_dev_list_lock);
1550
1551 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
1553EXPORT_SYMBOL(hci_register_dev);
1554
1555/* Unregister HCI device */
1556int hci_unregister_dev(struct hci_dev *hdev)
1557{
Marcel Holtmannef222012007-07-11 06:42:04 +02001558 int i;
1559
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001560 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 write_lock_bh(&hci_dev_list_lock);
1563 list_del(&hdev->list);
1564 write_unlock_bh(&hci_dev_list_lock);
1565
Ankit Vermad3e99df2012-04-25 15:42:04 -07001566 hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301568 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001569 kfree_skb(hdev->reassembly[i]);
1570
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001571 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001572 !test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001573 hdev->dev_type == HCI_BREDR) {
1574 hci_dev_lock_bh(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001575 mgmt_index_removed(hdev->id);
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001576 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001577 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001578
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001579 if (!IS_ERR(hdev->tfm))
1580 crypto_free_blkcipher(hdev->tfm);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 hci_notify(hdev, HCI_DEV_UNREG);
1583
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001584 if (hdev->rfkill) {
1585 rfkill_unregister(hdev->rfkill);
1586 rfkill_destroy(hdev->rfkill);
1587 }
1588
Dave Young147e2d52008-03-05 18:45:59 -08001589 hci_unregister_sysfs(hdev);
1590
Brian Gix3cd62042012-01-11 15:18:17 -08001591 /* Disable all timers */
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001592 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001593 del_timer(&hdev->adv_timer);
Brian Gix3cd62042012-01-11 15:18:17 -08001594 del_timer(&hdev->cmd_timer);
Brian Gix568dde92012-01-11 16:18:04 -08001595 del_timer(&hdev->disco_timer);
1596 del_timer(&hdev->disco_le_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001597
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001598 destroy_workqueue(hdev->workqueue);
1599
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001600 hci_dev_lock_bh(hdev);
1601 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001602 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001603 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001604 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001605 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001606 hci_dev_unlock_bh(hdev);
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001609
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 return 0;
1611}
1612EXPORT_SYMBOL(hci_unregister_dev);
1613
1614/* Suspend HCI device */
1615int hci_suspend_dev(struct hci_dev *hdev)
1616{
1617 hci_notify(hdev, HCI_DEV_SUSPEND);
1618 return 0;
1619}
1620EXPORT_SYMBOL(hci_suspend_dev);
1621
1622/* Resume HCI device */
1623int hci_resume_dev(struct hci_dev *hdev)
1624{
1625 hci_notify(hdev, HCI_DEV_RESUME);
1626 return 0;
1627}
1628EXPORT_SYMBOL(hci_resume_dev);
1629
Marcel Holtmann76bca882009-11-18 00:40:39 +01001630/* Receive frame from HCI drivers */
1631int hci_recv_frame(struct sk_buff *skb)
1632{
1633 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1634 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1635 && !test_bit(HCI_INIT, &hdev->flags))) {
1636 kfree_skb(skb);
1637 return -ENXIO;
1638 }
1639
1640 /* Incomming skb */
1641 bt_cb(skb)->incoming = 1;
1642
1643 /* Time stamp */
1644 __net_timestamp(skb);
1645
1646 /* Queue frame for rx task */
1647 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001648 tasklet_schedule(&hdev->rx_task);
1649
Marcel Holtmann76bca882009-11-18 00:40:39 +01001650 return 0;
1651}
1652EXPORT_SYMBOL(hci_recv_frame);
1653
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301654static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001655 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301656{
1657 int len = 0;
1658 int hlen = 0;
1659 int remain = count;
1660 struct sk_buff *skb;
1661 struct bt_skb_cb *scb;
1662
1663 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1664 index >= NUM_REASSEMBLY)
1665 return -EILSEQ;
1666
1667 skb = hdev->reassembly[index];
1668
1669 if (!skb) {
1670 switch (type) {
1671 case HCI_ACLDATA_PKT:
1672 len = HCI_MAX_FRAME_SIZE;
1673 hlen = HCI_ACL_HDR_SIZE;
1674 break;
1675 case HCI_EVENT_PKT:
1676 len = HCI_MAX_EVENT_SIZE;
1677 hlen = HCI_EVENT_HDR_SIZE;
1678 break;
1679 case HCI_SCODATA_PKT:
1680 len = HCI_MAX_SCO_SIZE;
1681 hlen = HCI_SCO_HDR_SIZE;
1682 break;
1683 }
1684
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001685 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301686 if (!skb)
1687 return -ENOMEM;
1688
1689 scb = (void *) skb->cb;
1690 scb->expect = hlen;
1691 scb->pkt_type = type;
1692
1693 skb->dev = (void *) hdev;
1694 hdev->reassembly[index] = skb;
1695 }
1696
1697 while (count) {
1698 scb = (void *) skb->cb;
1699 len = min(scb->expect, (__u16)count);
1700
1701 memcpy(skb_put(skb, len), data, len);
1702
1703 count -= len;
1704 data += len;
1705 scb->expect -= len;
1706 remain = count;
1707
1708 switch (type) {
1709 case HCI_EVENT_PKT:
1710 if (skb->len == HCI_EVENT_HDR_SIZE) {
1711 struct hci_event_hdr *h = hci_event_hdr(skb);
1712 scb->expect = h->plen;
1713
1714 if (skb_tailroom(skb) < scb->expect) {
1715 kfree_skb(skb);
1716 hdev->reassembly[index] = NULL;
1717 return -ENOMEM;
1718 }
1719 }
1720 break;
1721
1722 case HCI_ACLDATA_PKT:
1723 if (skb->len == HCI_ACL_HDR_SIZE) {
1724 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1725 scb->expect = __le16_to_cpu(h->dlen);
1726
1727 if (skb_tailroom(skb) < scb->expect) {
1728 kfree_skb(skb);
1729 hdev->reassembly[index] = NULL;
1730 return -ENOMEM;
1731 }
1732 }
1733 break;
1734
1735 case HCI_SCODATA_PKT:
1736 if (skb->len == HCI_SCO_HDR_SIZE) {
1737 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1738 scb->expect = h->dlen;
1739
1740 if (skb_tailroom(skb) < scb->expect) {
1741 kfree_skb(skb);
1742 hdev->reassembly[index] = NULL;
1743 return -ENOMEM;
1744 }
1745 }
1746 break;
1747 }
1748
1749 if (scb->expect == 0) {
1750 /* Complete frame */
1751
1752 bt_cb(skb)->pkt_type = type;
1753 hci_recv_frame(skb);
1754
1755 hdev->reassembly[index] = NULL;
1756 return remain;
1757 }
1758 }
1759
1760 return remain;
1761}
1762
Marcel Holtmannef222012007-07-11 06:42:04 +02001763int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1764{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301765 int rem = 0;
1766
Marcel Holtmannef222012007-07-11 06:42:04 +02001767 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1768 return -EILSEQ;
1769
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001770 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001771 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301772 if (rem < 0)
1773 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001774
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301775 data += (count - rem);
1776 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001777 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001778
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301779 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001780}
1781EXPORT_SYMBOL(hci_recv_fragment);
1782
Suraj Sumangala99811512010-07-14 13:02:19 +05301783#define STREAM_REASSEMBLY 0
1784
1785int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1786{
1787 int type;
1788 int rem = 0;
1789
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001790 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301791 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1792
1793 if (!skb) {
1794 struct { char type; } *pkt;
1795
1796 /* Start of the frame */
1797 pkt = data;
1798 type = pkt->type;
1799
1800 data++;
1801 count--;
1802 } else
1803 type = bt_cb(skb)->pkt_type;
1804
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001805 rem = hci_reassembly(hdev, type, data, count,
1806 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301807 if (rem < 0)
1808 return rem;
1809
1810 data += (count - rem);
1811 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001812 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301813
1814 return rem;
1815}
1816EXPORT_SYMBOL(hci_recv_stream_fragment);
1817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818/* ---- Interface to upper protocols ---- */
1819
1820/* Register/Unregister protocols.
1821 * hci_task_lock is used to ensure that no tasks are running. */
1822int hci_register_proto(struct hci_proto *hp)
1823{
1824 int err = 0;
1825
1826 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1827
1828 if (hp->id >= HCI_MAX_PROTO)
1829 return -EINVAL;
1830
1831 write_lock_bh(&hci_task_lock);
1832
1833 if (!hci_proto[hp->id])
1834 hci_proto[hp->id] = hp;
1835 else
1836 err = -EEXIST;
1837
1838 write_unlock_bh(&hci_task_lock);
1839
1840 return err;
1841}
1842EXPORT_SYMBOL(hci_register_proto);
1843
1844int hci_unregister_proto(struct hci_proto *hp)
1845{
1846 int err = 0;
1847
1848 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1849
1850 if (hp->id >= HCI_MAX_PROTO)
1851 return -EINVAL;
1852
1853 write_lock_bh(&hci_task_lock);
1854
1855 if (hci_proto[hp->id])
1856 hci_proto[hp->id] = NULL;
1857 else
1858 err = -ENOENT;
1859
1860 write_unlock_bh(&hci_task_lock);
1861
1862 return err;
1863}
1864EXPORT_SYMBOL(hci_unregister_proto);
1865
1866int hci_register_cb(struct hci_cb *cb)
1867{
1868 BT_DBG("%p name %s", cb, cb->name);
1869
1870 write_lock_bh(&hci_cb_list_lock);
1871 list_add(&cb->list, &hci_cb_list);
1872 write_unlock_bh(&hci_cb_list_lock);
1873
1874 return 0;
1875}
1876EXPORT_SYMBOL(hci_register_cb);
1877
1878int hci_unregister_cb(struct hci_cb *cb)
1879{
1880 BT_DBG("%p name %s", cb, cb->name);
1881
1882 write_lock_bh(&hci_cb_list_lock);
1883 list_del(&cb->list);
1884 write_unlock_bh(&hci_cb_list_lock);
1885
1886 return 0;
1887}
1888EXPORT_SYMBOL(hci_unregister_cb);
1889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001890int hci_register_amp(struct amp_mgr_cb *cb)
1891{
1892 BT_DBG("%p", cb);
1893
1894 write_lock_bh(&amp_mgr_cb_list_lock);
1895 list_add(&cb->list, &amp_mgr_cb_list);
1896 write_unlock_bh(&amp_mgr_cb_list_lock);
1897
1898 return 0;
1899}
1900EXPORT_SYMBOL(hci_register_amp);
1901
1902int hci_unregister_amp(struct amp_mgr_cb *cb)
1903{
1904 BT_DBG("%p", cb);
1905
1906 write_lock_bh(&amp_mgr_cb_list_lock);
1907 list_del(&cb->list);
1908 write_unlock_bh(&amp_mgr_cb_list_lock);
1909
1910 return 0;
1911}
1912EXPORT_SYMBOL(hci_unregister_amp);
1913
1914void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1915 struct sk_buff *skb)
1916{
1917 struct amp_mgr_cb *cb;
1918
1919 BT_DBG("opcode 0x%x", opcode);
1920
1921 read_lock_bh(&amp_mgr_cb_list_lock);
1922 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1923 if (cb->amp_cmd_complete_event)
1924 cb->amp_cmd_complete_event(hdev, opcode, skb);
1925 }
1926 read_unlock_bh(&amp_mgr_cb_list_lock);
1927}
1928
1929void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1930{
1931 struct amp_mgr_cb *cb;
1932
1933 BT_DBG("opcode 0x%x, status %d", opcode, status);
1934
1935 read_lock_bh(&amp_mgr_cb_list_lock);
1936 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1937 if (cb->amp_cmd_status_event)
1938 cb->amp_cmd_status_event(hdev, opcode, status);
1939 }
1940 read_unlock_bh(&amp_mgr_cb_list_lock);
1941}
1942
1943void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1944 struct sk_buff *skb)
1945{
1946 struct amp_mgr_cb *cb;
1947
1948 BT_DBG("ev_code 0x%x", ev_code);
1949
1950 read_lock_bh(&amp_mgr_cb_list_lock);
1951 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1952 if (cb->amp_event)
1953 cb->amp_event(hdev, ev_code, skb);
1954 }
1955 read_unlock_bh(&amp_mgr_cb_list_lock);
1956}
1957
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958static int hci_send_frame(struct sk_buff *skb)
1959{
1960 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1961
1962 if (!hdev) {
1963 kfree_skb(skb);
1964 return -ENODEV;
1965 }
1966
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001967 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001969 if (atomic_read(&hdev->promisc)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001971 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001973 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 }
1975
1976 /* Get rid of skb owner, prior to sending to the driver. */
1977 skb_orphan(skb);
1978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 return hdev->send(skb);
1981}
1982
1983/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001984int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 int len = HCI_COMMAND_HDR_SIZE + plen;
1987 struct hci_command_hdr *hdr;
1988 struct sk_buff *skb;
1989
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001990 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
1992 skb = bt_skb_alloc(len, GFP_ATOMIC);
1993 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001994 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 return -ENOMEM;
1996 }
1997
1998 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001999 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 hdr->plen = plen;
2001
2002 if (plen)
2003 memcpy(skb_put(skb, plen), param, plen);
2004
2005 BT_DBG("skb len %d", skb->len);
2006
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002007 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002009
Johan Hedberga5040ef2011-01-10 13:28:59 +02002010 if (test_bit(HCI_INIT, &hdev->flags))
2011 hdev->init_last_cmd = opcode;
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002014 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015
2016 return 0;
2017}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002021void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022{
2023 struct hci_command_hdr *hdr;
2024
2025 if (!hdev->sent_cmd)
2026 return NULL;
2027
2028 hdr = (void *) hdev->sent_cmd->data;
2029
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002030 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 return NULL;
2032
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002033 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
2035 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2036}
2037
2038/* Send ACL data */
2039static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2040{
2041 struct hci_acl_hdr *hdr;
2042 int len = skb->len;
2043
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002044 skb_push(skb, HCI_ACL_HDR_SIZE);
2045 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002046 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002047 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2048 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049}
2050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002051void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2052 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053{
2054 struct hci_dev *hdev = conn->hdev;
2055 struct sk_buff *list;
2056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058
2059 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002060 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002061 if (hdev->dev_type == HCI_BREDR)
2062 hci_add_acl_hdr(skb, conn->handle, flags);
2063 else
2064 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002066 list = skb_shinfo(skb)->frag_list;
2067 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 /* Non fragmented */
2069 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2070
2071 skb_queue_tail(&conn->data_q, skb);
2072 } else {
2073 /* Fragmented */
2074 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2075
2076 skb_shinfo(skb)->frag_list = NULL;
2077
2078 /* Queue all fragments atomically */
2079 spin_lock_bh(&conn->data_q.lock);
2080
2081 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002082 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002083 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 do {
2085 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002086
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002088 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002089 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2092
2093 __skb_queue_tail(&conn->data_q, skb);
2094 } while (list);
2095
2096 spin_unlock_bh(&conn->data_q.lock);
2097 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002098
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002099 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100}
2101EXPORT_SYMBOL(hci_send_acl);
2102
2103/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002104void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
2106 struct hci_dev *hdev = conn->hdev;
2107 struct hci_sco_hdr hdr;
2108
2109 BT_DBG("%s len %d", hdev->name, skb->len);
2110
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002111 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 hdr.dlen = skb->len;
2113
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002114 skb_push(skb, HCI_SCO_HDR_SIZE);
2115 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002116 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002119 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002122 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123}
2124EXPORT_SYMBOL(hci_send_sco);
2125
2126/* ---- HCI TX task (outgoing data) ---- */
2127
2128/* HCI Connection scheduler */
2129static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2130{
2131 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02002132 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 int num = 0, min = ~0;
2134 struct list_head *p;
2135
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002136 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 * added and removed with TX task disabled. */
2138 list_for_each(p, &h->list) {
2139 struct hci_conn *c;
2140 c = list_entry(p, struct hci_conn, list);
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002141
Marcel Holtmann769be972008-07-14 20:13:49 +02002142 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002144
2145 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2146 continue;
2147
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 num++;
2149
2150 if (c->sent < min) {
2151 min = c->sent;
2152 conn = c;
2153 }
2154 }
2155
2156 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002157 int cnt, q;
2158
2159 switch (conn->type) {
2160 case ACL_LINK:
2161 cnt = hdev->acl_cnt;
2162 break;
2163 case SCO_LINK:
2164 case ESCO_LINK:
2165 cnt = hdev->sco_cnt;
2166 break;
2167 case LE_LINK:
2168 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2169 break;
2170 default:
2171 cnt = 0;
2172 BT_ERR("Unknown link type");
2173 }
2174
2175 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 *quote = q ? q : 1;
2177 } else
2178 *quote = 0;
2179
2180 BT_DBG("conn %p quote %d", conn, *quote);
2181 return conn;
2182}
2183
Ville Tervobae1f5d2011-02-10 22:38:53 -03002184static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185{
2186 struct hci_conn_hash *h = &hdev->conn_hash;
2187 struct list_head *p;
2188 struct hci_conn *c;
2189
Ville Tervobae1f5d2011-02-10 22:38:53 -03002190 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
2192 /* Kill stalled connections */
2193 list_for_each(p, &h->list) {
2194 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002195 if (c->type == type && c->sent) {
2196 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 hdev->name, batostr(&c->dst));
2198 hci_acl_disconn(c, 0x13);
2199 }
2200 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002201}
2202
2203static inline void hci_sched_acl(struct hci_dev *hdev)
2204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 struct hci_conn *conn;
2206 struct sk_buff *skb;
2207 int quote;
2208
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002209 BT_DBG("%s", hdev->name);
2210
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 if (!test_bit(HCI_RAW, &hdev->flags)) {
2212 /* ACL tx timeout must be longer than maximum
2213 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002214 if (hdev->acl_cnt <= 0 &&
2215 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002216 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002219 while (hdev->acl_cnt > 0 &&
2220 (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2221 while (quote > 0 && (skb = skb_dequeue(&conn->data_q))) {
2222 int count = 1;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002223
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002226 if (hdev->flow_ctl_mode ==
2227 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2228 /* Calculate count of blocks used by
2229 * this packet
2230 */
2231 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2232 hdev->data_block_len) + 1;
2233
2234 if (count > hdev->acl_cnt)
2235 return;
2236
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002237 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002238
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_send_frame(skb);
2240 hdev->acl_last_tx = jiffies;
2241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002242 hdev->acl_cnt -= count;
2243 quote -= count;
2244
2245 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002247 }
2248}
2249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250/* Schedule SCO */
2251static inline void hci_sched_sco(struct hci_dev *hdev)
2252{
2253 struct hci_conn *conn;
2254 struct sk_buff *skb;
2255 int quote;
2256
2257 BT_DBG("%s", hdev->name);
2258
2259 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2260 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2261 BT_DBG("skb %p len %d", skb, skb->len);
2262 hci_send_frame(skb);
2263
2264 conn->sent++;
2265 if (conn->sent == ~0)
2266 conn->sent = 0;
2267 }
2268 }
2269}
2270
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002271static inline void hci_sched_esco(struct hci_dev *hdev)
2272{
2273 struct hci_conn *conn;
2274 struct sk_buff *skb;
2275 int quote;
2276
2277 BT_DBG("%s", hdev->name);
2278
2279 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2280 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2281 BT_DBG("skb %p len %d", skb, skb->len);
2282 hci_send_frame(skb);
2283
2284 conn->sent++;
2285 if (conn->sent == ~0)
2286 conn->sent = 0;
2287 }
2288 }
2289}
2290
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002291static inline void hci_sched_le(struct hci_dev *hdev)
2292{
2293 struct hci_conn *conn;
2294 struct sk_buff *skb;
2295 int quote, cnt;
2296
2297 BT_DBG("%s", hdev->name);
2298
2299 if (!test_bit(HCI_RAW, &hdev->flags)) {
2300 /* LE tx timeout must be longer than maximum
2301 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002302 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002303 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002304 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002305 }
2306
2307 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2308 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2309 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2310 BT_DBG("skb %p len %d", skb, skb->len);
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002311
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002312 hci_send_frame(skb);
2313 hdev->le_last_tx = jiffies;
2314
2315 cnt--;
2316 conn->sent++;
2317 }
2318 }
2319 if (hdev->le_pkts)
2320 hdev->le_cnt = cnt;
2321 else
2322 hdev->acl_cnt = cnt;
2323}
2324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325static void hci_tx_task(unsigned long arg)
2326{
2327 struct hci_dev *hdev = (struct hci_dev *) arg;
2328 struct sk_buff *skb;
2329
2330 read_lock(&hci_task_lock);
2331
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002332 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2333 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
2335 /* Schedule queues and send stuff to HCI driver */
2336
2337 hci_sched_acl(hdev);
2338
2339 hci_sched_sco(hdev);
2340
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002341 hci_sched_esco(hdev);
2342
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002343 hci_sched_le(hdev);
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 /* Send next queued raw (unknown type) packet */
2346 while ((skb = skb_dequeue(&hdev->raw_q)))
2347 hci_send_frame(skb);
2348
2349 read_unlock(&hci_task_lock);
2350}
2351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002352/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354/* ACL data packet */
2355static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2356{
2357 struct hci_acl_hdr *hdr = (void *) skb->data;
2358 struct hci_conn *conn;
2359 __u16 handle, flags;
2360
2361 skb_pull(skb, HCI_ACL_HDR_SIZE);
2362
2363 handle = __le16_to_cpu(hdr->handle);
2364 flags = hci_flags(handle);
2365 handle = hci_handle(handle);
2366
2367 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2368
2369 hdev->stat.acl_rx++;
2370
2371 hci_dev_lock(hdev);
2372 conn = hci_conn_hash_lookup_handle(hdev, handle);
2373 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 if (conn) {
2376 register struct hci_proto *hp;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002377
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002378 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Johan Hedberg671267b2012-05-12 16:11:50 -03002379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002381 hp = hci_proto[HCI_PROTO_L2CAP];
2382 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383 hp->recv_acldata(conn, skb, flags);
2384 return;
2385 }
2386 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002387 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 hdev->name, handle);
2389 }
2390
2391 kfree_skb(skb);
2392}
2393
2394/* SCO data packet */
2395static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2396{
2397 struct hci_sco_hdr *hdr = (void *) skb->data;
2398 struct hci_conn *conn;
2399 __u16 handle;
2400
2401 skb_pull(skb, HCI_SCO_HDR_SIZE);
2402
2403 handle = __le16_to_cpu(hdr->handle);
2404
2405 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2406
2407 hdev->stat.sco_rx++;
2408
2409 hci_dev_lock(hdev);
2410 conn = hci_conn_hash_lookup_handle(hdev, handle);
2411 hci_dev_unlock(hdev);
2412
2413 if (conn) {
2414 register struct hci_proto *hp;
2415
2416 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002417 hp = hci_proto[HCI_PROTO_SCO];
2418 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 hp->recv_scodata(conn, skb);
2420 return;
2421 }
2422 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002423 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 hdev->name, handle);
2425 }
2426
2427 kfree_skb(skb);
2428}
2429
Marcel Holtmann65164552005-10-28 19:20:48 +02002430static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431{
2432 struct hci_dev *hdev = (struct hci_dev *) arg;
2433 struct sk_buff *skb;
2434
2435 BT_DBG("%s", hdev->name);
2436
2437 read_lock(&hci_task_lock);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 while ((skb = skb_dequeue(&hdev->rx_q))) {
2440 if (atomic_read(&hdev->promisc)) {
2441 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002442 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 }
2444
2445 if (test_bit(HCI_RAW, &hdev->flags)) {
2446 kfree_skb(skb);
2447 continue;
2448 }
2449
2450 if (test_bit(HCI_INIT, &hdev->flags)) {
2451 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002452 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 case HCI_ACLDATA_PKT:
2454 case HCI_SCODATA_PKT:
2455 kfree_skb(skb);
2456 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 }
2459
2460 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002461 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 case HCI_EVENT_PKT:
2463 hci_event_packet(hdev, skb);
2464 break;
2465
2466 case HCI_ACLDATA_PKT:
2467 BT_DBG("%s ACL data packet", hdev->name);
2468 hci_acldata_packet(hdev, skb);
2469 break;
2470
2471 case HCI_SCODATA_PKT:
2472 BT_DBG("%s SCO data packet", hdev->name);
2473 hci_scodata_packet(hdev, skb);
2474 break;
2475
2476 default:
2477 kfree_skb(skb);
2478 break;
2479 }
2480 }
2481
2482 read_unlock(&hci_task_lock);
2483}
2484
2485static void hci_cmd_task(unsigned long arg)
2486{
2487 struct hci_dev *hdev = (struct hci_dev *) arg;
2488 struct sk_buff *skb;
2489
2490 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2491
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002493 if (atomic_read(&hdev->cmd_cnt)) {
2494 skb = skb_dequeue(&hdev->cmd_q);
2495 if (!skb)
2496 return;
2497
Wei Yongjun7585b972009-02-25 18:29:52 +08002498 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002500 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2501 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 atomic_dec(&hdev->cmd_cnt);
2503 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002504 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002505 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 } else {
2507 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002508 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 }
2510 }
2511}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002513module_param(enable_smp, bool, 0644);
2514MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");