blob: dcbe1d29bb8e6be3bd9fdcd0fa0698cc706a355d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020057int enable_hs;
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static void hci_cmd_task(unsigned long arg);
60static void hci_rx_task(unsigned long arg);
61static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63static DEFINE_RWLOCK(hci_task_lock);
64
65/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080078static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
Alan Sterne041c682006-03-27 01:16:30 -080089 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Marcel Holtmann65164552005-10-28 19:20:48 +020092static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Alan Sterne041c682006-03-27 01:16:30 -080094 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97/* ---- HCI requests ---- */
98
Johan Hedberg23bb5762010-12-21 23:01:27 +020099void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
Johan Hedberga5040ef2011-01-10 13:28:59 +0200103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200107 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100171 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 int ret;
174
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200197 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100212 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233#if 0
234 /* Host buffer size */
235 {
236 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700239 cp.acl_max_pkt = cpu_to_le16(0xffff);
240 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243#endif
244
245 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
247
248 /* Read Class of Device */
249 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
250
251 /* Read Local Name */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Optional initialization */
258
259 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200260 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700264 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200266
267 bacpy(&cp.bdaddr, BDADDR_ANY);
268 cp.delete_all = 1;
269 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300272static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
273{
274 BT_DBG("%s", hdev->name);
275
276 /* Read LE buffer size */
277 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
281{
282 __u8 scan = opt;
283
284 BT_DBG("%s %x", hdev->name, scan);
285
286 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200287 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
290static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
291{
292 __u8 auth = opt;
293
294 BT_DBG("%s %x", hdev->name, auth);
295
296 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200297 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
301{
302 __u8 encrypt = opt;
303
304 BT_DBG("%s %x", hdev->name, encrypt);
305
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200306 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200307 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __le16 policy = cpu_to_le16(opt);
313
Marcel Holtmanna418b892008-11-30 12:17:28 +0100314 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315
316 /* Default link policy */
317 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318}
319
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900320/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 * Device is held on return. */
322struct hci_dev *hci_dev_get(int index)
323{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200324 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 BT_DBG("%d", index);
327
328 if (index < 0)
329 return NULL;
330
331 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200332 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
335 break;
336 }
337 }
338 read_unlock(&hci_dev_list_lock);
339 return hdev;
340}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342/* ---- Inquiry support ---- */
343static void inquiry_cache_flush(struct hci_dev *hdev)
344{
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
347
348 BT_DBG("cache %p", cache);
349
350 cache->list = NULL;
351 while ((e = next)) {
352 next = e->next;
353 kfree(e);
354 }
355}
356
357struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
361
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
366 break;
367 return e;
368}
369
370void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371{
372 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200383
384 ie->next = cache->list;
385 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 cache->timestamp = jiffies;
391}
392
393static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394{
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
398 int copied = 0;
399
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
408 info++;
409 }
410
411 BT_DBG("cache %p, copied %d", cache, copied);
412 return copied;
413}
414
415static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416{
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
419
420 BT_DBG("%s", hdev->name);
421
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
423 return;
424
425 /* Start Inquiry */
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
432int hci_inquiry(void __user *arg)
433{
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
438 long timeo;
439 __u8 *buf;
440
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 return -EFAULT;
443
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200444 hdev = hci_dev_get(ir.dev_id);
445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return -ENODEV;
447
448 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 inquiry_cache_flush(hdev);
453 do_inquiry = 1;
454 }
455 hci_dev_unlock_bh(hdev);
456
Marcel Holtmann04837f62006-07-03 10:02:33 +0200457 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200458
459 if (do_inquiry) {
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 if (err < 0)
462 goto done;
463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 /* for unlimited number of responses we will use buffer with 255 entries */
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 * copy it to the user space.
470 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200472 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 err = -ENOMEM;
474 goto done;
475 }
476
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
480
481 BT_DBG("num_rsp %d", ir.num_rsp);
482
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 ptr += sizeof(ir);
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 ir.num_rsp))
487 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900488 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 err = -EFAULT;
490
491 kfree(buf);
492
493done:
494 hci_dev_put(hdev);
495 return err;
496}
497
498/* ---- HCI ioctl helpers ---- */
499
500int hci_dev_open(__u16 dev)
501{
502 struct hci_dev *hdev;
503 int ret = 0;
504
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200505 hdev = hci_dev_get(dev);
506 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 return -ENODEV;
508
509 BT_DBG("%s %p", hdev->name, hdev);
510
511 hci_req_lock(hdev);
512
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 ret = -ERFKILL;
515 goto done;
516 }
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (test_bit(HCI_UP, &hdev->flags)) {
519 ret = -EALREADY;
520 goto done;
521 }
522
523 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 set_bit(HCI_RAW, &hdev->flags);
525
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200526 /* Treat all non BR/EDR controllers as raw devices if
527 enable_hs is not set */
528 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100529 set_bit(HCI_RAW, &hdev->flags);
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 if (hdev->open(hdev)) {
532 ret = -EIO;
533 goto done;
534 }
535
536 if (!test_bit(HCI_RAW, &hdev->flags)) {
537 atomic_set(&hdev->cmd_cnt, 1);
538 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200539 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Marcel Holtmann04837f62006-07-03 10:02:33 +0200541 ret = __hci_request(hdev, hci_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Andre Guedeseead27d2011-06-30 19:20:55 -0300544 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300545 ret = __hci_request(hdev, hci_le_init_req, 0,
546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 clear_bit(HCI_INIT, &hdev->flags);
549 }
550
551 if (!ret) {
552 hci_dev_hold(hdev);
553 set_bit(HCI_UP, &hdev->flags);
554 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200555 if (!test_bit(HCI_SETUP, &hdev->flags)) {
556 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200557 mgmt_powered(hdev, 1);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200558 hci_dev_unlock_bh(hdev);
559 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900560 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 /* Init failed, cleanup */
562 tasklet_kill(&hdev->rx_task);
563 tasklet_kill(&hdev->tx_task);
564 tasklet_kill(&hdev->cmd_task);
565
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->rx_q);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 if (hdev->sent_cmd) {
573 kfree_skb(hdev->sent_cmd);
574 hdev->sent_cmd = NULL;
575 }
576
577 hdev->close(hdev);
578 hdev->flags = 0;
579 }
580
581done:
582 hci_req_unlock(hdev);
583 hci_dev_put(hdev);
584 return ret;
585}
586
587static int hci_dev_do_close(struct hci_dev *hdev)
588{
589 BT_DBG("%s %p", hdev->name, hdev);
590
591 hci_req_cancel(hdev, ENODEV);
592 hci_req_lock(hdev);
593
594 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300595 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 hci_req_unlock(hdev);
597 return 0;
598 }
599
600 /* Kill RX and TX tasks */
601 tasklet_kill(&hdev->rx_task);
602 tasklet_kill(&hdev->tx_task);
603
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200604 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200605 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200606 hdev->discov_timeout = 0;
607 }
608
Johan Hedberg32435532011-11-07 22:16:04 +0200609 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200610 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 hci_dev_lock_bh(hdev);
613 inquiry_cache_flush(hdev);
614 hci_conn_hash_flush(hdev);
615 hci_dev_unlock_bh(hdev);
616
617 hci_notify(hdev, HCI_DEV_DOWN);
618
619 if (hdev->flush)
620 hdev->flush(hdev);
621
622 /* Reset device */
623 skb_queue_purge(&hdev->cmd_q);
624 atomic_set(&hdev->cmd_cnt, 1);
625 if (!test_bit(HCI_RAW, &hdev->flags)) {
626 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200627 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200628 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 clear_bit(HCI_INIT, &hdev->flags);
630 }
631
632 /* Kill cmd task */
633 tasklet_kill(&hdev->cmd_task);
634
635 /* Drop queues */
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
638 skb_queue_purge(&hdev->raw_q);
639
640 /* Drop last sent command */
641 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300642 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 kfree_skb(hdev->sent_cmd);
644 hdev->sent_cmd = NULL;
645 }
646
647 /* After this point our queues are empty
648 * and no tasks are scheduled. */
649 hdev->close(hdev);
650
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200651 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200652 mgmt_powered(hdev, 0);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200653 hci_dev_unlock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 /* Clear flags */
656 hdev->flags = 0;
657
658 hci_req_unlock(hdev);
659
660 hci_dev_put(hdev);
661 return 0;
662}
663
664int hci_dev_close(__u16 dev)
665{
666 struct hci_dev *hdev;
667 int err;
668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200669 hdev = hci_dev_get(dev);
670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -ENODEV;
672 err = hci_dev_do_close(hdev);
673 hci_dev_put(hdev);
674 return err;
675}
676
677int hci_dev_reset(__u16 dev)
678{
679 struct hci_dev *hdev;
680 int ret = 0;
681
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200682 hdev = hci_dev_get(dev);
683 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 return -ENODEV;
685
686 hci_req_lock(hdev);
687 tasklet_disable(&hdev->tx_task);
688
689 if (!test_bit(HCI_UP, &hdev->flags))
690 goto done;
691
692 /* Drop queues */
693 skb_queue_purge(&hdev->rx_q);
694 skb_queue_purge(&hdev->cmd_q);
695
696 hci_dev_lock_bh(hdev);
697 inquiry_cache_flush(hdev);
698 hci_conn_hash_flush(hdev);
699 hci_dev_unlock_bh(hdev);
700
701 if (hdev->flush)
702 hdev->flush(hdev);
703
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900704 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300705 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200708 ret = __hci_request(hdev, hci_reset_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711done:
712 tasklet_enable(&hdev->tx_task);
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718int hci_dev_reset_stat(__u16 dev)
719{
720 struct hci_dev *hdev;
721 int ret = 0;
722
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200723 hdev = hci_dev_get(dev);
724 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return -ENODEV;
726
727 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
728
729 hci_dev_put(hdev);
730
731 return ret;
732}
733
734int hci_dev_cmd(unsigned int cmd, void __user *arg)
735{
736 struct hci_dev *hdev;
737 struct hci_dev_req dr;
738 int err = 0;
739
740 if (copy_from_user(&dr, arg, sizeof(dr)))
741 return -EFAULT;
742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200743 hdev = hci_dev_get(dr.dev_id);
744 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return -ENODEV;
746
747 switch (cmd) {
748 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 break;
752
753 case HCISETENCRYPT:
754 if (!lmp_encrypt_capable(hdev)) {
755 err = -EOPNOTSUPP;
756 break;
757 }
758
759 if (!test_bit(HCI_AUTH, &hdev->flags)) {
760 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200761 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 if (err)
764 break;
765 }
766
Marcel Holtmann04837f62006-07-03 10:02:33 +0200767 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 break;
770
771 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200772 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
773 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 break;
775
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200776 case HCISETLINKPOL:
777 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
779 break;
780
781 case HCISETLINKMODE:
782 hdev->link_mode = ((__u16) dr.dev_opt) &
783 (HCI_LM_MASTER | HCI_LM_ACCEPT);
784 break;
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 case HCISETPTYPE:
787 hdev->pkt_type = (__u16) dr.dev_opt;
788 break;
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200791 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
792 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 break;
794
795 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200796 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
797 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 break;
799
800 default:
801 err = -EINVAL;
802 break;
803 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 hci_dev_put(hdev);
806 return err;
807}
808
809int hci_get_dev_list(void __user *arg)
810{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200811 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct hci_dev_list_req *dl;
813 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 int n = 0, size, err;
815 __u16 dev_num;
816
817 if (get_user(dev_num, (__u16 __user *) arg))
818 return -EFAULT;
819
820 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
821 return -EINVAL;
822
823 size = sizeof(*dl) + dev_num * sizeof(*dr);
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 dl = kzalloc(size, GFP_KERNEL);
826 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENOMEM;
828
829 dr = dl->dev_req;
830
831 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200832 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200834 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200835
836 if (!test_bit(HCI_MGMT, &hdev->flags))
837 set_bit(HCI_PAIRABLE, &hdev->flags);
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 (dr + n)->dev_id = hdev->id;
840 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (++n >= dev_num)
843 break;
844 }
845 read_unlock_bh(&hci_dev_list_lock);
846
847 dl->dev_num = n;
848 size = sizeof(*dl) + n * sizeof(*dr);
849
850 err = copy_to_user(arg, dl, size);
851 kfree(dl);
852
853 return err ? -EFAULT : 0;
854}
855
856int hci_get_dev_info(void __user *arg)
857{
858 struct hci_dev *hdev;
859 struct hci_dev_info di;
860 int err = 0;
861
862 if (copy_from_user(&di, arg, sizeof(di)))
863 return -EFAULT;
864
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 hdev = hci_dev_get(di.dev_id);
866 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return -ENODEV;
868
Johan Hedberg32435532011-11-07 22:16:04 +0200869 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
870 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200871
Johan Hedbergc542a062011-01-26 13:11:03 +0200872 if (!test_bit(HCI_MGMT, &hdev->flags))
873 set_bit(HCI_PAIRABLE, &hdev->flags);
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 strcpy(di.name, hdev->name);
876 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100877 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 di.flags = hdev->flags;
879 di.pkt_type = hdev->pkt_type;
880 di.acl_mtu = hdev->acl_mtu;
881 di.acl_pkts = hdev->acl_pkts;
882 di.sco_mtu = hdev->sco_mtu;
883 di.sco_pkts = hdev->sco_pkts;
884 di.link_policy = hdev->link_policy;
885 di.link_mode = hdev->link_mode;
886
887 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
888 memcpy(&di.features, &hdev->features, sizeof(di.features));
889
890 if (copy_to_user(arg, &di, sizeof(di)))
891 err = -EFAULT;
892
893 hci_dev_put(hdev);
894
895 return err;
896}
897
898/* ---- Interface to HCI drivers ---- */
899
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200900static int hci_rfkill_set_block(void *data, bool blocked)
901{
902 struct hci_dev *hdev = data;
903
904 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
905
906 if (!blocked)
907 return 0;
908
909 hci_dev_do_close(hdev);
910
911 return 0;
912}
913
914static const struct rfkill_ops hci_rfkill_ops = {
915 .set_block = hci_rfkill_set_block,
916};
917
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918/* Alloc HCI device */
919struct hci_dev *hci_alloc_dev(void)
920{
921 struct hci_dev *hdev;
922
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200923 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (!hdev)
925 return NULL;
926
David Herrmann0ac7e702011-10-08 14:58:47 +0200927 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 skb_queue_head_init(&hdev->driver_init);
929
930 return hdev;
931}
932EXPORT_SYMBOL(hci_alloc_dev);
933
934/* Free HCI device */
935void hci_free_dev(struct hci_dev *hdev)
936{
937 skb_queue_purge(&hdev->driver_init);
938
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200939 /* will free via device release */
940 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942EXPORT_SYMBOL(hci_free_dev);
943
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200944static void hci_power_on(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
947
948 BT_DBG("%s", hdev->name);
949
950 if (hci_dev_open(hdev->id) < 0)
951 return;
952
953 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200954 queue_delayed_work(hdev->workqueue, &hdev->power_off,
955 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200956
957 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200958 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200959}
960
961static void hci_power_off(struct work_struct *work)
962{
Johan Hedberg32435532011-11-07 22:16:04 +0200963 struct hci_dev *hdev = container_of(work, struct hci_dev,
964 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200965
966 BT_DBG("%s", hdev->name);
967
Johan Hedberg32435532011-11-07 22:16:04 +0200968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200970 hci_dev_close(hdev->id);
971}
972
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200973static void hci_discov_off(struct work_struct *work)
974{
975 struct hci_dev *hdev;
976 u8 scan = SCAN_PAGE;
977
978 hdev = container_of(work, struct hci_dev, discov_off.work);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_lock_bh(hdev);
983
984 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
985
986 hdev->discov_timeout = 0;
987
988 hci_dev_unlock_bh(hdev);
989}
990
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200991int hci_uuids_clear(struct hci_dev *hdev)
992{
993 struct list_head *p, *n;
994
995 list_for_each_safe(p, n, &hdev->uuids) {
996 struct bt_uuid *uuid;
997
998 uuid = list_entry(p, struct bt_uuid, list);
999
1000 list_del(p);
1001 kfree(uuid);
1002 }
1003
1004 return 0;
1005}
1006
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001007int hci_link_keys_clear(struct hci_dev *hdev)
1008{
1009 struct list_head *p, *n;
1010
1011 list_for_each_safe(p, n, &hdev->link_keys) {
1012 struct link_key *key;
1013
1014 key = list_entry(p, struct link_key, list);
1015
1016 list_del(p);
1017 kfree(key);
1018 }
1019
1020 return 0;
1021}
1022
1023struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1024{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001025 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001026
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001027 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001028 if (bacmp(bdaddr, &k->bdaddr) == 0)
1029 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001030
1031 return NULL;
1032}
1033
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001034static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1035 u8 key_type, u8 old_key_type)
1036{
1037 /* Legacy key */
1038 if (key_type < 0x03)
1039 return 1;
1040
1041 /* Debug keys are insecure so don't store them persistently */
1042 if (key_type == HCI_LK_DEBUG_COMBINATION)
1043 return 0;
1044
1045 /* Changed combination key and there's no previous one */
1046 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1047 return 0;
1048
1049 /* Security mode 3 case */
1050 if (!conn)
1051 return 1;
1052
1053 /* Neither local nor remote side had no-bonding as requirement */
1054 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1055 return 1;
1056
1057 /* Local side had dedicated bonding as requirement */
1058 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1059 return 1;
1060
1061 /* Remote side had dedicated bonding as requirement */
1062 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1063 return 1;
1064
1065 /* If none of the above criteria match, then don't store the key
1066 * persistently */
1067 return 0;
1068}
1069
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001070struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1071{
1072 struct link_key *k;
1073
1074 list_for_each_entry(k, &hdev->link_keys, list) {
1075 struct key_master_id *id;
1076
1077 if (k->type != HCI_LK_SMP_LTK)
1078 continue;
1079
1080 if (k->dlen != sizeof(*id))
1081 continue;
1082
1083 id = (void *) &k->data;
1084 if (id->ediv == ediv &&
1085 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1086 return k;
1087 }
1088
1089 return NULL;
1090}
1091EXPORT_SYMBOL(hci_find_ltk);
1092
1093struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1094 bdaddr_t *bdaddr, u8 type)
1095{
1096 struct link_key *k;
1097
1098 list_for_each_entry(k, &hdev->link_keys, list)
1099 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1100 return k;
1101
1102 return NULL;
1103}
1104EXPORT_SYMBOL(hci_find_link_key_type);
1105
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001106int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1107 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001108{
1109 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001110 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001111
1112 old_key = hci_find_link_key(hdev, bdaddr);
1113 if (old_key) {
1114 old_key_type = old_key->type;
1115 key = old_key;
1116 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001117 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001118 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1119 if (!key)
1120 return -ENOMEM;
1121 list_add(&key->list, &hdev->link_keys);
1122 }
1123
1124 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1125
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001126 /* Some buggy controller combinations generate a changed
1127 * combination key for legacy pairing even when there's no
1128 * previous key */
1129 if (type == HCI_LK_CHANGED_COMBINATION &&
1130 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001131 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001132 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001133 if (conn)
1134 conn->key_type = type;
1135 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001136
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001137 bacpy(&key->bdaddr, bdaddr);
1138 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001139 key->pin_len = pin_len;
1140
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001141 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001142 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001143 else
1144 key->type = type;
1145
Johan Hedberg4df378a2011-04-28 11:29:03 -07001146 if (!new_key)
1147 return 0;
1148
1149 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1150
Johan Hedberg744cf192011-11-08 20:40:14 +02001151 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001152
1153 if (!persistent) {
1154 list_del(&key->list);
1155 kfree(key);
1156 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001157
1158 return 0;
1159}
1160
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001161int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001162 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001163{
1164 struct link_key *key, *old_key;
1165 struct key_master_id *id;
1166 u8 old_key_type;
1167
1168 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1169
1170 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1171 if (old_key) {
1172 key = old_key;
1173 old_key_type = old_key->type;
1174 } else {
1175 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1176 if (!key)
1177 return -ENOMEM;
1178 list_add(&key->list, &hdev->link_keys);
1179 old_key_type = 0xff;
1180 }
1181
1182 key->dlen = sizeof(*id);
1183
1184 bacpy(&key->bdaddr, bdaddr);
1185 memcpy(key->val, ltk, sizeof(key->val));
1186 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001187 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001188
1189 id = (void *) &key->data;
1190 id->ediv = ediv;
1191 memcpy(id->rand, rand, sizeof(id->rand));
1192
1193 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001194 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001195
1196 return 0;
1197}
1198
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001199int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1200{
1201 struct link_key *key;
1202
1203 key = hci_find_link_key(hdev, bdaddr);
1204 if (!key)
1205 return -ENOENT;
1206
1207 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1208
1209 list_del(&key->list);
1210 kfree(key);
1211
1212 return 0;
1213}
1214
Ville Tervo6bd32322011-02-16 16:32:41 +02001215/* HCI command timer function */
1216static void hci_cmd_timer(unsigned long arg)
1217{
1218 struct hci_dev *hdev = (void *) arg;
1219
1220 BT_ERR("%s command tx timeout", hdev->name);
1221 atomic_set(&hdev->cmd_cnt, 1);
1222 tasklet_schedule(&hdev->cmd_task);
1223}
1224
Szymon Janc2763eda2011-03-22 13:12:22 +01001225struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1226 bdaddr_t *bdaddr)
1227{
1228 struct oob_data *data;
1229
1230 list_for_each_entry(data, &hdev->remote_oob_data, list)
1231 if (bacmp(bdaddr, &data->bdaddr) == 0)
1232 return data;
1233
1234 return NULL;
1235}
1236
1237int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1238{
1239 struct oob_data *data;
1240
1241 data = hci_find_remote_oob_data(hdev, bdaddr);
1242 if (!data)
1243 return -ENOENT;
1244
1245 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1246
1247 list_del(&data->list);
1248 kfree(data);
1249
1250 return 0;
1251}
1252
1253int hci_remote_oob_data_clear(struct hci_dev *hdev)
1254{
1255 struct oob_data *data, *n;
1256
1257 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1258 list_del(&data->list);
1259 kfree(data);
1260 }
1261
1262 return 0;
1263}
1264
1265int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1266 u8 *randomizer)
1267{
1268 struct oob_data *data;
1269
1270 data = hci_find_remote_oob_data(hdev, bdaddr);
1271
1272 if (!data) {
1273 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1274 if (!data)
1275 return -ENOMEM;
1276
1277 bacpy(&data->bdaddr, bdaddr);
1278 list_add(&data->list, &hdev->remote_oob_data);
1279 }
1280
1281 memcpy(data->hash, hash, sizeof(data->hash));
1282 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1283
1284 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1285
1286 return 0;
1287}
1288
Antti Julkub2a66aa2011-06-15 12:01:14 +03001289struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1290 bdaddr_t *bdaddr)
1291{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001292 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001293
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001294 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001295 if (bacmp(bdaddr, &b->bdaddr) == 0)
1296 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001297
1298 return NULL;
1299}
1300
1301int hci_blacklist_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1307
1308 b = list_entry(p, struct bdaddr_list, list);
1309
1310 list_del(p);
1311 kfree(b);
1312 }
1313
1314 return 0;
1315}
1316
1317int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1318{
1319 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001320
1321 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1322 return -EBADF;
1323
Antti Julku5e762442011-08-25 16:48:02 +03001324 if (hci_blacklist_lookup(hdev, bdaddr))
1325 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001326
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001328 if (!entry)
1329 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001330
1331 bacpy(&entry->bdaddr, bdaddr);
1332
1333 list_add(&entry->list, &hdev->blacklist);
1334
Johan Hedberg744cf192011-11-08 20:40:14 +02001335 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001336}
1337
1338int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001341
Szymon Janc1ec918c2011-11-16 09:32:21 +01001342 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001343 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001344
1345 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001346 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001347 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001348
1349 list_del(&entry->list);
1350 kfree(entry);
1351
Johan Hedberg744cf192011-11-08 20:40:14 +02001352 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001353}
1354
Andre Guedes35815082011-05-26 16:23:53 -03001355static void hci_clear_adv_cache(unsigned long arg)
1356{
1357 struct hci_dev *hdev = (void *) arg;
1358
1359 hci_dev_lock(hdev);
1360
1361 hci_adv_entries_clear(hdev);
1362
1363 hci_dev_unlock(hdev);
1364}
1365
Andre Guedes76c86862011-05-26 16:23:50 -03001366int hci_adv_entries_clear(struct hci_dev *hdev)
1367{
1368 struct adv_entry *entry, *tmp;
1369
1370 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1371 list_del(&entry->list);
1372 kfree(entry);
1373 }
1374
1375 BT_DBG("%s adv cache cleared", hdev->name);
1376
1377 return 0;
1378}
1379
1380struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct adv_entry *entry;
1383
1384 list_for_each_entry(entry, &hdev->adv_entries, list)
1385 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1386 return entry;
1387
1388 return NULL;
1389}
1390
1391static inline int is_connectable_adv(u8 evt_type)
1392{
1393 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1394 return 1;
1395
1396 return 0;
1397}
1398
1399int hci_add_adv_entry(struct hci_dev *hdev,
1400 struct hci_ev_le_advertising_info *ev)
1401{
1402 struct adv_entry *entry;
1403
1404 if (!is_connectable_adv(ev->evt_type))
1405 return -EINVAL;
1406
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 return 0;
1411
1412 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, &ev->bdaddr);
1417 entry->bdaddr_type = ev->bdaddr_type;
1418
1419 list_add(&entry->list, &hdev->adv_entries);
1420
1421 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1422 batostr(&entry->bdaddr), entry->bdaddr_type);
1423
1424 return 0;
1425}
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427/* Register HCI device */
1428int hci_register_dev(struct hci_dev *hdev)
1429{
1430 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001431 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001433 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1434 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 return -EINVAL;
1438
Mat Martineau08add512011-11-02 16:18:36 -07001439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1441 */
1442 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 write_lock_bh(&hci_dev_list_lock);
1445
1446 /* Find first available device id */
1447 list_for_each(p, &hci_dev_list) {
1448 if (list_entry(p, struct hci_dev, list)->id != id)
1449 break;
1450 head = p; id++;
1451 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 sprintf(hdev->name, "hci%d", id);
1454 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001455 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
1457 atomic_set(&hdev->refcnt, 1);
1458 spin_lock_init(&hdev->lock);
1459
1460 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001461 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001463 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001465 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
Marcel Holtmann04837f62006-07-03 10:02:33 +02001467 hdev->idle_timeout = 0;
1468 hdev->sniff_max_interval = 800;
1469 hdev->sniff_min_interval = 80;
1470
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001471 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1473 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1474
1475 skb_queue_head_init(&hdev->rx_q);
1476 skb_queue_head_init(&hdev->cmd_q);
1477 skb_queue_head_init(&hdev->raw_q);
1478
Ville Tervo6bd32322011-02-16 16:32:41 +02001479 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1480
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301481 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001482 hdev->reassembly[i] = NULL;
1483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001485 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486
1487 inquiry_cache_init(hdev);
1488
1489 hci_conn_hash_init(hdev);
1490
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001491 INIT_LIST_HEAD(&hdev->mgmt_pending);
1492
David Millerea4bd8b2010-07-30 21:54:49 -07001493 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001494
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001495 INIT_LIST_HEAD(&hdev->uuids);
1496
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001497 INIT_LIST_HEAD(&hdev->link_keys);
1498
Szymon Janc2763eda2011-03-22 13:12:22 +01001499 INIT_LIST_HEAD(&hdev->remote_oob_data);
1500
Andre Guedes76c86862011-05-26 16:23:50 -03001501 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001502 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1503 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001504
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001505 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001506 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001507
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001508 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1511
1512 atomic_set(&hdev->promisc, 0);
1513
1514 write_unlock_bh(&hci_dev_list_lock);
1515
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001516 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001517 if (!hdev->workqueue) {
1518 error = -ENOMEM;
1519 goto err;
1520 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001521
David Herrmann33ca9542011-10-08 14:58:49 +02001522 error = hci_add_sysfs(hdev);
1523 if (error < 0)
1524 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001526 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1527 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1528 if (hdev->rfkill) {
1529 if (rfkill_register(hdev->rfkill) < 0) {
1530 rfkill_destroy(hdev->rfkill);
1531 hdev->rfkill = NULL;
1532 }
1533 }
1534
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001535 set_bit(HCI_AUTO_OFF, &hdev->flags);
1536 set_bit(HCI_SETUP, &hdev->flags);
1537 queue_work(hdev->workqueue, &hdev->power_on);
1538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 hci_notify(hdev, HCI_DEV_REG);
1540
1541 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001542
David Herrmann33ca9542011-10-08 14:58:49 +02001543err_wqueue:
1544 destroy_workqueue(hdev->workqueue);
1545err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001546 write_lock_bh(&hci_dev_list_lock);
1547 list_del(&hdev->list);
1548 write_unlock_bh(&hci_dev_list_lock);
1549
David Herrmann33ca9542011-10-08 14:58:49 +02001550 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
1552EXPORT_SYMBOL(hci_register_dev);
1553
1554/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001555void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556{
Marcel Holtmannef222012007-07-11 06:42:04 +02001557 int i;
1558
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001559 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 write_lock_bh(&hci_dev_list_lock);
1562 list_del(&hdev->list);
1563 write_unlock_bh(&hci_dev_list_lock);
1564
1565 hci_dev_do_close(hdev);
1566
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301567 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001568 kfree_skb(hdev->reassembly[i]);
1569
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001570 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001571 !test_bit(HCI_SETUP, &hdev->flags)) {
1572 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001573 mgmt_index_removed(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001574 hci_dev_unlock_bh(hdev);
1575 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001576
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001577 /* mgmt_index_removed should take care of emptying the
1578 * pending list */
1579 BUG_ON(!list_empty(&hdev->mgmt_pending));
1580
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 hci_notify(hdev, HCI_DEV_UNREG);
1582
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001583 if (hdev->rfkill) {
1584 rfkill_unregister(hdev->rfkill);
1585 rfkill_destroy(hdev->rfkill);
1586 }
1587
David Herrmannce242972011-10-08 14:58:48 +02001588 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001589
Andre Guedes35815082011-05-26 16:23:53 -03001590 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001591
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001592 destroy_workqueue(hdev->workqueue);
1593
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001594 hci_dev_lock_bh(hdev);
1595 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001596 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001597 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001598 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001599 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001600 hci_dev_unlock_bh(hdev);
1601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603}
1604EXPORT_SYMBOL(hci_unregister_dev);
1605
1606/* Suspend HCI device */
1607int hci_suspend_dev(struct hci_dev *hdev)
1608{
1609 hci_notify(hdev, HCI_DEV_SUSPEND);
1610 return 0;
1611}
1612EXPORT_SYMBOL(hci_suspend_dev);
1613
1614/* Resume HCI device */
1615int hci_resume_dev(struct hci_dev *hdev)
1616{
1617 hci_notify(hdev, HCI_DEV_RESUME);
1618 return 0;
1619}
1620EXPORT_SYMBOL(hci_resume_dev);
1621
Marcel Holtmann76bca882009-11-18 00:40:39 +01001622/* Receive frame from HCI drivers */
1623int hci_recv_frame(struct sk_buff *skb)
1624{
1625 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1626 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1627 && !test_bit(HCI_INIT, &hdev->flags))) {
1628 kfree_skb(skb);
1629 return -ENXIO;
1630 }
1631
1632 /* Incomming skb */
1633 bt_cb(skb)->incoming = 1;
1634
1635 /* Time stamp */
1636 __net_timestamp(skb);
1637
1638 /* Queue frame for rx task */
1639 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001640 tasklet_schedule(&hdev->rx_task);
1641
Marcel Holtmann76bca882009-11-18 00:40:39 +01001642 return 0;
1643}
1644EXPORT_SYMBOL(hci_recv_frame);
1645
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301646static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001647 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301648{
1649 int len = 0;
1650 int hlen = 0;
1651 int remain = count;
1652 struct sk_buff *skb;
1653 struct bt_skb_cb *scb;
1654
1655 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1656 index >= NUM_REASSEMBLY)
1657 return -EILSEQ;
1658
1659 skb = hdev->reassembly[index];
1660
1661 if (!skb) {
1662 switch (type) {
1663 case HCI_ACLDATA_PKT:
1664 len = HCI_MAX_FRAME_SIZE;
1665 hlen = HCI_ACL_HDR_SIZE;
1666 break;
1667 case HCI_EVENT_PKT:
1668 len = HCI_MAX_EVENT_SIZE;
1669 hlen = HCI_EVENT_HDR_SIZE;
1670 break;
1671 case HCI_SCODATA_PKT:
1672 len = HCI_MAX_SCO_SIZE;
1673 hlen = HCI_SCO_HDR_SIZE;
1674 break;
1675 }
1676
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001677 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301678 if (!skb)
1679 return -ENOMEM;
1680
1681 scb = (void *) skb->cb;
1682 scb->expect = hlen;
1683 scb->pkt_type = type;
1684
1685 skb->dev = (void *) hdev;
1686 hdev->reassembly[index] = skb;
1687 }
1688
1689 while (count) {
1690 scb = (void *) skb->cb;
1691 len = min(scb->expect, (__u16)count);
1692
1693 memcpy(skb_put(skb, len), data, len);
1694
1695 count -= len;
1696 data += len;
1697 scb->expect -= len;
1698 remain = count;
1699
1700 switch (type) {
1701 case HCI_EVENT_PKT:
1702 if (skb->len == HCI_EVENT_HDR_SIZE) {
1703 struct hci_event_hdr *h = hci_event_hdr(skb);
1704 scb->expect = h->plen;
1705
1706 if (skb_tailroom(skb) < scb->expect) {
1707 kfree_skb(skb);
1708 hdev->reassembly[index] = NULL;
1709 return -ENOMEM;
1710 }
1711 }
1712 break;
1713
1714 case HCI_ACLDATA_PKT:
1715 if (skb->len == HCI_ACL_HDR_SIZE) {
1716 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1717 scb->expect = __le16_to_cpu(h->dlen);
1718
1719 if (skb_tailroom(skb) < scb->expect) {
1720 kfree_skb(skb);
1721 hdev->reassembly[index] = NULL;
1722 return -ENOMEM;
1723 }
1724 }
1725 break;
1726
1727 case HCI_SCODATA_PKT:
1728 if (skb->len == HCI_SCO_HDR_SIZE) {
1729 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1730 scb->expect = h->dlen;
1731
1732 if (skb_tailroom(skb) < scb->expect) {
1733 kfree_skb(skb);
1734 hdev->reassembly[index] = NULL;
1735 return -ENOMEM;
1736 }
1737 }
1738 break;
1739 }
1740
1741 if (scb->expect == 0) {
1742 /* Complete frame */
1743
1744 bt_cb(skb)->pkt_type = type;
1745 hci_recv_frame(skb);
1746
1747 hdev->reassembly[index] = NULL;
1748 return remain;
1749 }
1750 }
1751
1752 return remain;
1753}
1754
Marcel Holtmannef222012007-07-11 06:42:04 +02001755int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1756{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301757 int rem = 0;
1758
Marcel Holtmannef222012007-07-11 06:42:04 +02001759 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1760 return -EILSEQ;
1761
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001762 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001763 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301764 if (rem < 0)
1765 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001766
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301767 data += (count - rem);
1768 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001769 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001770
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301771 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001772}
1773EXPORT_SYMBOL(hci_recv_fragment);
1774
Suraj Sumangala99811512010-07-14 13:02:19 +05301775#define STREAM_REASSEMBLY 0
1776
1777int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1778{
1779 int type;
1780 int rem = 0;
1781
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001782 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301783 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1784
1785 if (!skb) {
1786 struct { char type; } *pkt;
1787
1788 /* Start of the frame */
1789 pkt = data;
1790 type = pkt->type;
1791
1792 data++;
1793 count--;
1794 } else
1795 type = bt_cb(skb)->pkt_type;
1796
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001797 rem = hci_reassembly(hdev, type, data, count,
1798 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301799 if (rem < 0)
1800 return rem;
1801
1802 data += (count - rem);
1803 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001804 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301805
1806 return rem;
1807}
1808EXPORT_SYMBOL(hci_recv_stream_fragment);
1809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810/* ---- Interface to upper protocols ---- */
1811
1812/* Register/Unregister protocols.
1813 * hci_task_lock is used to ensure that no tasks are running. */
1814int hci_register_proto(struct hci_proto *hp)
1815{
1816 int err = 0;
1817
1818 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1819
1820 if (hp->id >= HCI_MAX_PROTO)
1821 return -EINVAL;
1822
1823 write_lock_bh(&hci_task_lock);
1824
1825 if (!hci_proto[hp->id])
1826 hci_proto[hp->id] = hp;
1827 else
1828 err = -EEXIST;
1829
1830 write_unlock_bh(&hci_task_lock);
1831
1832 return err;
1833}
1834EXPORT_SYMBOL(hci_register_proto);
1835
1836int hci_unregister_proto(struct hci_proto *hp)
1837{
1838 int err = 0;
1839
1840 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1841
1842 if (hp->id >= HCI_MAX_PROTO)
1843 return -EINVAL;
1844
1845 write_lock_bh(&hci_task_lock);
1846
1847 if (hci_proto[hp->id])
1848 hci_proto[hp->id] = NULL;
1849 else
1850 err = -ENOENT;
1851
1852 write_unlock_bh(&hci_task_lock);
1853
1854 return err;
1855}
1856EXPORT_SYMBOL(hci_unregister_proto);
1857
1858int hci_register_cb(struct hci_cb *cb)
1859{
1860 BT_DBG("%p name %s", cb, cb->name);
1861
1862 write_lock_bh(&hci_cb_list_lock);
1863 list_add(&cb->list, &hci_cb_list);
1864 write_unlock_bh(&hci_cb_list_lock);
1865
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_register_cb);
1869
1870int hci_unregister_cb(struct hci_cb *cb)
1871{
1872 BT_DBG("%p name %s", cb, cb->name);
1873
1874 write_lock_bh(&hci_cb_list_lock);
1875 list_del(&cb->list);
1876 write_unlock_bh(&hci_cb_list_lock);
1877
1878 return 0;
1879}
1880EXPORT_SYMBOL(hci_unregister_cb);
1881
1882static int hci_send_frame(struct sk_buff *skb)
1883{
1884 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1885
1886 if (!hdev) {
1887 kfree_skb(skb);
1888 return -ENODEV;
1889 }
1890
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001891 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
1893 if (atomic_read(&hdev->promisc)) {
1894 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001895 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001897 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
1899
1900 /* Get rid of skb owner, prior to sending to the driver. */
1901 skb_orphan(skb);
1902
1903 return hdev->send(skb);
1904}
1905
1906/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001907int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908{
1909 int len = HCI_COMMAND_HDR_SIZE + plen;
1910 struct hci_command_hdr *hdr;
1911 struct sk_buff *skb;
1912
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001913 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001914
1915 skb = bt_skb_alloc(len, GFP_ATOMIC);
1916 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001917 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 return -ENOMEM;
1919 }
1920
1921 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001922 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 hdr->plen = plen;
1924
1925 if (plen)
1926 memcpy(skb_put(skb, plen), param, plen);
1927
1928 BT_DBG("skb len %d", skb->len);
1929
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001930 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001932
Johan Hedberga5040ef2011-01-10 13:28:59 +02001933 if (test_bit(HCI_INIT, &hdev->flags))
1934 hdev->init_last_cmd = opcode;
1935
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001937 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 return 0;
1940}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001943void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 struct hci_command_hdr *hdr;
1946
1947 if (!hdev->sent_cmd)
1948 return NULL;
1949
1950 hdr = (void *) hdev->sent_cmd->data;
1951
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001952 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 return NULL;
1954
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001955 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
1957 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1958}
1959
1960/* Send ACL data */
1961static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1962{
1963 struct hci_acl_hdr *hdr;
1964 int len = skb->len;
1965
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001966 skb_push(skb, HCI_ACL_HDR_SIZE);
1967 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001968 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001969 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1970 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001973static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1974 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975{
1976 struct hci_dev *hdev = conn->hdev;
1977 struct sk_buff *list;
1978
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001979 list = skb_shinfo(skb)->frag_list;
1980 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 /* Non fragmented */
1982 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1983
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001984 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 } else {
1986 /* Fragmented */
1987 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1988
1989 skb_shinfo(skb)->frag_list = NULL;
1990
1991 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001992 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001994 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001995
1996 flags &= ~ACL_START;
1997 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 do {
1999 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002000
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002002 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002003 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2006
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002007 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 } while (list);
2009
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002010 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002012}
2013
2014void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2015{
2016 struct hci_conn *conn = chan->conn;
2017 struct hci_dev *hdev = conn->hdev;
2018
2019 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2020
2021 skb->dev = (void *) hdev;
2022 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2023 hci_add_acl_hdr(skb, conn->handle, flags);
2024
2025 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002027 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028}
2029EXPORT_SYMBOL(hci_send_acl);
2030
2031/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002032void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033{
2034 struct hci_dev *hdev = conn->hdev;
2035 struct hci_sco_hdr hdr;
2036
2037 BT_DBG("%s len %d", hdev->name, skb->len);
2038
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002039 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 hdr.dlen = skb->len;
2041
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002042 skb_push(skb, HCI_SCO_HDR_SIZE);
2043 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002044 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045
2046 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002047 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002050 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052EXPORT_SYMBOL(hci_send_sco);
2053
2054/* ---- HCI TX task (outgoing data) ---- */
2055
2056/* HCI Connection scheduler */
2057static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2058{
2059 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002060 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002063 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002065 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002066 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002068
2069 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2070 continue;
2071
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 num++;
2073
2074 if (c->sent < min) {
2075 min = c->sent;
2076 conn = c;
2077 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002078
2079 if (hci_conn_num(hdev, type) == num)
2080 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 }
2082
2083 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002084 int cnt, q;
2085
2086 switch (conn->type) {
2087 case ACL_LINK:
2088 cnt = hdev->acl_cnt;
2089 break;
2090 case SCO_LINK:
2091 case ESCO_LINK:
2092 cnt = hdev->sco_cnt;
2093 break;
2094 case LE_LINK:
2095 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2096 break;
2097 default:
2098 cnt = 0;
2099 BT_ERR("Unknown link type");
2100 }
2101
2102 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 *quote = q ? q : 1;
2104 } else
2105 *quote = 0;
2106
2107 BT_DBG("conn %p quote %d", conn, *quote);
2108 return conn;
2109}
2110
Ville Tervobae1f5d92011-02-10 22:38:53 -03002111static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
2113 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002114 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Ville Tervobae1f5d92011-02-10 22:38:53 -03002116 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002119 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002120 if (c->type == type && c->sent) {
2121 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 hdev->name, batostr(&c->dst));
2123 hci_acl_disconn(c, 0x13);
2124 }
2125 }
2126}
2127
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002128static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2129 int *quote)
2130{
2131 struct hci_conn_hash *h = &hdev->conn_hash;
2132 struct hci_chan *chan = NULL;
2133 int num = 0, min = ~0, cur_prio = 0;
2134 struct hci_conn *conn;
2135 int cnt, q, conn_num = 0;
2136
2137 BT_DBG("%s", hdev->name);
2138
2139 list_for_each_entry(conn, &h->list, list) {
2140 struct hci_chan_hash *ch;
2141 struct hci_chan *tmp;
2142
2143 if (conn->type != type)
2144 continue;
2145
2146 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2147 continue;
2148
2149 conn_num++;
2150
2151 ch = &conn->chan_hash;
2152
2153 list_for_each_entry(tmp, &ch->list, list) {
2154 struct sk_buff *skb;
2155
2156 if (skb_queue_empty(&tmp->data_q))
2157 continue;
2158
2159 skb = skb_peek(&tmp->data_q);
2160 if (skb->priority < cur_prio)
2161 continue;
2162
2163 if (skb->priority > cur_prio) {
2164 num = 0;
2165 min = ~0;
2166 cur_prio = skb->priority;
2167 }
2168
2169 num++;
2170
2171 if (conn->sent < min) {
2172 min = conn->sent;
2173 chan = tmp;
2174 }
2175 }
2176
2177 if (hci_conn_num(hdev, type) == conn_num)
2178 break;
2179 }
2180
2181 if (!chan)
2182 return NULL;
2183
2184 switch (chan->conn->type) {
2185 case ACL_LINK:
2186 cnt = hdev->acl_cnt;
2187 break;
2188 case SCO_LINK:
2189 case ESCO_LINK:
2190 cnt = hdev->sco_cnt;
2191 break;
2192 case LE_LINK:
2193 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2194 break;
2195 default:
2196 cnt = 0;
2197 BT_ERR("Unknown link type");
2198 }
2199
2200 q = cnt / num;
2201 *quote = q ? q : 1;
2202 BT_DBG("chan %p quote %d", chan, *quote);
2203 return chan;
2204}
2205
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002206static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2207{
2208 struct hci_conn_hash *h = &hdev->conn_hash;
2209 struct hci_conn *conn;
2210 int num = 0;
2211
2212 BT_DBG("%s", hdev->name);
2213
2214 list_for_each_entry(conn, &h->list, list) {
2215 struct hci_chan_hash *ch;
2216 struct hci_chan *chan;
2217
2218 if (conn->type != type)
2219 continue;
2220
2221 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2222 continue;
2223
2224 num++;
2225
2226 ch = &conn->chan_hash;
2227 list_for_each_entry(chan, &ch->list, list) {
2228 struct sk_buff *skb;
2229
2230 if (chan->sent) {
2231 chan->sent = 0;
2232 continue;
2233 }
2234
2235 if (skb_queue_empty(&chan->data_q))
2236 continue;
2237
2238 skb = skb_peek(&chan->data_q);
2239 if (skb->priority >= HCI_PRIO_MAX - 1)
2240 continue;
2241
2242 skb->priority = HCI_PRIO_MAX - 1;
2243
2244 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2245 skb->priority);
2246 }
2247
2248 if (hci_conn_num(hdev, type) == num)
2249 break;
2250 }
2251}
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253static inline void hci_sched_acl(struct hci_dev *hdev)
2254{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002255 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 struct sk_buff *skb;
2257 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002258 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
2260 BT_DBG("%s", hdev->name);
2261
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002262 if (!hci_conn_num(hdev, ACL_LINK))
2263 return;
2264
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 if (!test_bit(HCI_RAW, &hdev->flags)) {
2266 /* ACL tx timeout must be longer than maximum
2267 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002268 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002269 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 }
2271
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002272 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002273
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002274 while (hdev->acl_cnt &&
2275 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002276 u32 priority = (skb_peek(&chan->data_q))->priority;
2277 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002278 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2279 skb->len, skb->priority);
2280
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002281 /* Stop if priority has changed */
2282 if (skb->priority < priority)
2283 break;
2284
2285 skb = skb_dequeue(&chan->data_q);
2286
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002287 hci_conn_enter_active_mode(chan->conn,
2288 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002289
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 hci_send_frame(skb);
2291 hdev->acl_last_tx = jiffies;
2292
2293 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002294 chan->sent++;
2295 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 }
2297 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002298
2299 if (cnt != hdev->acl_cnt)
2300 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
2302
2303/* Schedule SCO */
2304static inline void hci_sched_sco(struct hci_dev *hdev)
2305{
2306 struct hci_conn *conn;
2307 struct sk_buff *skb;
2308 int quote;
2309
2310 BT_DBG("%s", hdev->name);
2311
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002312 if (!hci_conn_num(hdev, SCO_LINK))
2313 return;
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2316 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2317 BT_DBG("skb %p len %d", skb, skb->len);
2318 hci_send_frame(skb);
2319
2320 conn->sent++;
2321 if (conn->sent == ~0)
2322 conn->sent = 0;
2323 }
2324 }
2325}
2326
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002327static inline void hci_sched_esco(struct hci_dev *hdev)
2328{
2329 struct hci_conn *conn;
2330 struct sk_buff *skb;
2331 int quote;
2332
2333 BT_DBG("%s", hdev->name);
2334
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002335 if (!hci_conn_num(hdev, ESCO_LINK))
2336 return;
2337
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002338 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2339 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2340 BT_DBG("skb %p len %d", skb, skb->len);
2341 hci_send_frame(skb);
2342
2343 conn->sent++;
2344 if (conn->sent == ~0)
2345 conn->sent = 0;
2346 }
2347 }
2348}
2349
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002350static inline void hci_sched_le(struct hci_dev *hdev)
2351{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002352 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002353 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002354 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002355
2356 BT_DBG("%s", hdev->name);
2357
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002358 if (!hci_conn_num(hdev, LE_LINK))
2359 return;
2360
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002361 if (!test_bit(HCI_RAW, &hdev->flags)) {
2362 /* LE tx timeout must be longer than maximum
2363 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002364 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002365 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002366 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002367 }
2368
2369 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002370 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002371 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002372 u32 priority = (skb_peek(&chan->data_q))->priority;
2373 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002374 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2375 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002376
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002377 /* Stop if priority has changed */
2378 if (skb->priority < priority)
2379 break;
2380
2381 skb = skb_dequeue(&chan->data_q);
2382
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002383 hci_send_frame(skb);
2384 hdev->le_last_tx = jiffies;
2385
2386 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002387 chan->sent++;
2388 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002389 }
2390 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002391
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002392 if (hdev->le_pkts)
2393 hdev->le_cnt = cnt;
2394 else
2395 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002396
2397 if (cnt != tmp)
2398 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002399}
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401static void hci_tx_task(unsigned long arg)
2402{
2403 struct hci_dev *hdev = (struct hci_dev *) arg;
2404 struct sk_buff *skb;
2405
2406 read_lock(&hci_task_lock);
2407
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002408 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2409 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411 /* Schedule queues and send stuff to HCI driver */
2412
2413 hci_sched_acl(hdev);
2414
2415 hci_sched_sco(hdev);
2416
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002417 hci_sched_esco(hdev);
2418
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002419 hci_sched_le(hdev);
2420
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 /* Send next queued raw (unknown type) packet */
2422 while ((skb = skb_dequeue(&hdev->raw_q)))
2423 hci_send_frame(skb);
2424
2425 read_unlock(&hci_task_lock);
2426}
2427
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002428/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429
2430/* ACL data packet */
2431static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2432{
2433 struct hci_acl_hdr *hdr = (void *) skb->data;
2434 struct hci_conn *conn;
2435 __u16 handle, flags;
2436
2437 skb_pull(skb, HCI_ACL_HDR_SIZE);
2438
2439 handle = __le16_to_cpu(hdr->handle);
2440 flags = hci_flags(handle);
2441 handle = hci_handle(handle);
2442
2443 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2444
2445 hdev->stat.acl_rx++;
2446
2447 hci_dev_lock(hdev);
2448 conn = hci_conn_hash_lookup_handle(hdev, handle);
2449 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002450
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 if (conn) {
2452 register struct hci_proto *hp;
2453
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002454 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002455
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002457 hp = hci_proto[HCI_PROTO_L2CAP];
2458 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 hp->recv_acldata(conn, skb, flags);
2460 return;
2461 }
2462 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002463 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464 hdev->name, handle);
2465 }
2466
2467 kfree_skb(skb);
2468}
2469
2470/* SCO data packet */
2471static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2472{
2473 struct hci_sco_hdr *hdr = (void *) skb->data;
2474 struct hci_conn *conn;
2475 __u16 handle;
2476
2477 skb_pull(skb, HCI_SCO_HDR_SIZE);
2478
2479 handle = __le16_to_cpu(hdr->handle);
2480
2481 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2482
2483 hdev->stat.sco_rx++;
2484
2485 hci_dev_lock(hdev);
2486 conn = hci_conn_hash_lookup_handle(hdev, handle);
2487 hci_dev_unlock(hdev);
2488
2489 if (conn) {
2490 register struct hci_proto *hp;
2491
2492 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002493 hp = hci_proto[HCI_PROTO_SCO];
2494 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 hp->recv_scodata(conn, skb);
2496 return;
2497 }
2498 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002499 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 hdev->name, handle);
2501 }
2502
2503 kfree_skb(skb);
2504}
2505
Marcel Holtmann65164552005-10-28 19:20:48 +02002506static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507{
2508 struct hci_dev *hdev = (struct hci_dev *) arg;
2509 struct sk_buff *skb;
2510
2511 BT_DBG("%s", hdev->name);
2512
2513 read_lock(&hci_task_lock);
2514
2515 while ((skb = skb_dequeue(&hdev->rx_q))) {
2516 if (atomic_read(&hdev->promisc)) {
2517 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002518 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 }
2520
2521 if (test_bit(HCI_RAW, &hdev->flags)) {
2522 kfree_skb(skb);
2523 continue;
2524 }
2525
2526 if (test_bit(HCI_INIT, &hdev->flags)) {
2527 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002528 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 case HCI_ACLDATA_PKT:
2530 case HCI_SCODATA_PKT:
2531 kfree_skb(skb);
2532 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002533 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 }
2535
2536 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002537 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 case HCI_EVENT_PKT:
2539 hci_event_packet(hdev, skb);
2540 break;
2541
2542 case HCI_ACLDATA_PKT:
2543 BT_DBG("%s ACL data packet", hdev->name);
2544 hci_acldata_packet(hdev, skb);
2545 break;
2546
2547 case HCI_SCODATA_PKT:
2548 BT_DBG("%s SCO data packet", hdev->name);
2549 hci_scodata_packet(hdev, skb);
2550 break;
2551
2552 default:
2553 kfree_skb(skb);
2554 break;
2555 }
2556 }
2557
2558 read_unlock(&hci_task_lock);
2559}
2560
2561static void hci_cmd_task(unsigned long arg)
2562{
2563 struct hci_dev *hdev = (struct hci_dev *) arg;
2564 struct sk_buff *skb;
2565
2566 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2567
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002569 if (atomic_read(&hdev->cmd_cnt)) {
2570 skb = skb_dequeue(&hdev->cmd_q);
2571 if (!skb)
2572 return;
2573
Wei Yongjun7585b972009-02-25 18:29:52 +08002574 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002576 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2577 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 atomic_dec(&hdev->cmd_cnt);
2579 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002580 if (test_bit(HCI_RESET, &hdev->flags))
2581 del_timer(&hdev->cmd_timer);
2582 else
2583 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002584 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 } else {
2586 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002587 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 }
2589 }
2590}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002591
2592int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2593{
2594 /* General inquiry access code (GIAC) */
2595 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2596 struct hci_cp_inquiry cp;
2597
2598 BT_DBG("%s", hdev->name);
2599
2600 if (test_bit(HCI_INQUIRY, &hdev->flags))
2601 return -EINPROGRESS;
2602
2603 memset(&cp, 0, sizeof(cp));
2604 memcpy(&cp.lap, lap, sizeof(cp.lap));
2605 cp.length = length;
2606
2607 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2608}
Andre Guedes023d50492011-11-04 14:16:52 -03002609
2610int hci_cancel_inquiry(struct hci_dev *hdev)
2611{
2612 BT_DBG("%s", hdev->name);
2613
2614 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2615 return -EPERM;
2616
2617 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2618}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002619
2620module_param(enable_hs, bool, 0644);
2621MODULE_PARM_DESC(enable_hs, "Enable High Speed");