blob: 086e157ebf44d73572165312734d6ef94ed29375 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020057int enable_hs;
58
Linus Torvalds1da177e2005-04-16 15:20:36 -070059static void hci_cmd_task(unsigned long arg);
60static void hci_rx_task(unsigned long arg);
61static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63static DEFINE_RWLOCK(hci_task_lock);
64
65/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080078static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
Alan Sterne041c682006-03-27 01:16:30 -080089 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Marcel Holtmann65164552005-10-28 19:20:48 +020092static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Alan Sterne041c682006-03-27 01:16:30 -080094 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97/* ---- HCI requests ---- */
98
Johan Hedberg23bb5762010-12-21 23:01:27 +020099void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
Johan Hedberga5040ef2011-01-10 13:28:59 +0200103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200107 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100171 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 int ret;
174
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200197 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100212 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233#if 0
234 /* Host buffer size */
235 {
236 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700239 cp.acl_max_pkt = cpu_to_le16(0xffff);
240 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243#endif
244
245 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
247
248 /* Read Class of Device */
249 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
250
251 /* Read Local Name */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Optional initialization */
258
259 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200260 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700264 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200266
267 bacpy(&cp.bdaddr, BDADDR_ANY);
268 cp.delete_all = 1;
269 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300272static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
273{
274 BT_DBG("%s", hdev->name);
275
276 /* Read LE buffer size */
277 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
281{
282 __u8 scan = opt;
283
284 BT_DBG("%s %x", hdev->name, scan);
285
286 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200287 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
290static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
291{
292 __u8 auth = opt;
293
294 BT_DBG("%s %x", hdev->name, auth);
295
296 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200297 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
301{
302 __u8 encrypt = opt;
303
304 BT_DBG("%s %x", hdev->name, encrypt);
305
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200306 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200307 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __le16 policy = cpu_to_le16(opt);
313
Marcel Holtmanna418b892008-11-30 12:17:28 +0100314 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315
316 /* Default link policy */
317 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318}
319
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900320/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 * Device is held on return. */
322struct hci_dev *hci_dev_get(int index)
323{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200324 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 BT_DBG("%d", index);
327
328 if (index < 0)
329 return NULL;
330
331 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200332 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
335 break;
336 }
337 }
338 read_unlock(&hci_dev_list_lock);
339 return hdev;
340}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
342/* ---- Inquiry support ---- */
343static void inquiry_cache_flush(struct hci_dev *hdev)
344{
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
347
348 BT_DBG("cache %p", cache);
349
350 cache->list = NULL;
351 while ((e = next)) {
352 next = e->next;
353 kfree(e);
354 }
355}
356
357struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
361
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
366 break;
367 return e;
368}
369
370void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371{
372 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
381 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200383
384 ie->next = cache->list;
385 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 cache->timestamp = jiffies;
391}
392
393static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394{
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
398 int copied = 0;
399
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
408 info++;
409 }
410
411 BT_DBG("cache %p, copied %d", cache, copied);
412 return copied;
413}
414
415static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416{
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
419
420 BT_DBG("%s", hdev->name);
421
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
423 return;
424
425 /* Start Inquiry */
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
432int hci_inquiry(void __user *arg)
433{
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
438 long timeo;
439 __u8 *buf;
440
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 return -EFAULT;
443
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200444 hdev = hci_dev_get(ir.dev_id);
445 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return -ENODEV;
447
448 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 inquiry_cache_flush(hdev);
453 do_inquiry = 1;
454 }
455 hci_dev_unlock_bh(hdev);
456
Marcel Holtmann04837f62006-07-03 10:02:33 +0200457 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200458
459 if (do_inquiry) {
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
461 if (err < 0)
462 goto done;
463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 /* for unlimited number of responses we will use buffer with 255 entries */
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467
468 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 * copy it to the user space.
470 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200472 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 err = -ENOMEM;
474 goto done;
475 }
476
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
480
481 BT_DBG("num_rsp %d", ir.num_rsp);
482
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 ptr += sizeof(ir);
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 ir.num_rsp))
487 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900488 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 err = -EFAULT;
490
491 kfree(buf);
492
493done:
494 hci_dev_put(hdev);
495 return err;
496}
497
498/* ---- HCI ioctl helpers ---- */
499
500int hci_dev_open(__u16 dev)
501{
502 struct hci_dev *hdev;
503 int ret = 0;
504
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200505 hdev = hci_dev_get(dev);
506 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 return -ENODEV;
508
509 BT_DBG("%s %p", hdev->name, hdev);
510
511 hci_req_lock(hdev);
512
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
514 ret = -ERFKILL;
515 goto done;
516 }
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (test_bit(HCI_UP, &hdev->flags)) {
519 ret = -EALREADY;
520 goto done;
521 }
522
523 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 set_bit(HCI_RAW, &hdev->flags);
525
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200526 /* Treat all non BR/EDR controllers as raw devices if
527 enable_hs is not set */
528 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100529 set_bit(HCI_RAW, &hdev->flags);
530
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 if (hdev->open(hdev)) {
532 ret = -EIO;
533 goto done;
534 }
535
536 if (!test_bit(HCI_RAW, &hdev->flags)) {
537 atomic_set(&hdev->cmd_cnt, 1);
538 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200539 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Marcel Holtmann04837f62006-07-03 10:02:33 +0200541 ret = __hci_request(hdev, hci_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Andre Guedeseead27d2011-06-30 19:20:55 -0300544 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300545 ret = __hci_request(hdev, hci_le_init_req, 0,
546 msecs_to_jiffies(HCI_INIT_TIMEOUT));
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 clear_bit(HCI_INIT, &hdev->flags);
549 }
550
551 if (!ret) {
552 hci_dev_hold(hdev);
553 set_bit(HCI_UP, &hdev->flags);
554 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200555 if (!test_bit(HCI_SETUP, &hdev->flags)) {
556 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200557 mgmt_powered(hdev, 1);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200558 hci_dev_unlock_bh(hdev);
559 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900560 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 /* Init failed, cleanup */
562 tasklet_kill(&hdev->rx_task);
563 tasklet_kill(&hdev->tx_task);
564 tasklet_kill(&hdev->cmd_task);
565
566 skb_queue_purge(&hdev->cmd_q);
567 skb_queue_purge(&hdev->rx_q);
568
569 if (hdev->flush)
570 hdev->flush(hdev);
571
572 if (hdev->sent_cmd) {
573 kfree_skb(hdev->sent_cmd);
574 hdev->sent_cmd = NULL;
575 }
576
577 hdev->close(hdev);
578 hdev->flags = 0;
579 }
580
581done:
582 hci_req_unlock(hdev);
583 hci_dev_put(hdev);
584 return ret;
585}
586
587static int hci_dev_do_close(struct hci_dev *hdev)
588{
589 BT_DBG("%s %p", hdev->name, hdev);
590
591 hci_req_cancel(hdev, ENODEV);
592 hci_req_lock(hdev);
593
594 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300595 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 hci_req_unlock(hdev);
597 return 0;
598 }
599
600 /* Kill RX and TX tasks */
601 tasklet_kill(&hdev->rx_task);
602 tasklet_kill(&hdev->tx_task);
603
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200604 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200605 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200606 hdev->discov_timeout = 0;
607 }
608
Johan Hedberg32435532011-11-07 22:16:04 +0200609 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200610 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200611
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 hci_dev_lock_bh(hdev);
613 inquiry_cache_flush(hdev);
614 hci_conn_hash_flush(hdev);
615 hci_dev_unlock_bh(hdev);
616
617 hci_notify(hdev, HCI_DEV_DOWN);
618
619 if (hdev->flush)
620 hdev->flush(hdev);
621
622 /* Reset device */
623 skb_queue_purge(&hdev->cmd_q);
624 atomic_set(&hdev->cmd_cnt, 1);
625 if (!test_bit(HCI_RAW, &hdev->flags)) {
626 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200627 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200628 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 clear_bit(HCI_INIT, &hdev->flags);
630 }
631
632 /* Kill cmd task */
633 tasklet_kill(&hdev->cmd_task);
634
635 /* Drop queues */
636 skb_queue_purge(&hdev->rx_q);
637 skb_queue_purge(&hdev->cmd_q);
638 skb_queue_purge(&hdev->raw_q);
639
640 /* Drop last sent command */
641 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300642 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 kfree_skb(hdev->sent_cmd);
644 hdev->sent_cmd = NULL;
645 }
646
647 /* After this point our queues are empty
648 * and no tasks are scheduled. */
649 hdev->close(hdev);
650
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200651 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200652 mgmt_powered(hdev, 0);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200653 hci_dev_unlock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 /* Clear flags */
656 hdev->flags = 0;
657
658 hci_req_unlock(hdev);
659
660 hci_dev_put(hdev);
661 return 0;
662}
663
664int hci_dev_close(__u16 dev)
665{
666 struct hci_dev *hdev;
667 int err;
668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200669 hdev = hci_dev_get(dev);
670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -ENODEV;
672 err = hci_dev_do_close(hdev);
673 hci_dev_put(hdev);
674 return err;
675}
676
677int hci_dev_reset(__u16 dev)
678{
679 struct hci_dev *hdev;
680 int ret = 0;
681
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200682 hdev = hci_dev_get(dev);
683 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 return -ENODEV;
685
686 hci_req_lock(hdev);
687 tasklet_disable(&hdev->tx_task);
688
689 if (!test_bit(HCI_UP, &hdev->flags))
690 goto done;
691
692 /* Drop queues */
693 skb_queue_purge(&hdev->rx_q);
694 skb_queue_purge(&hdev->cmd_q);
695
696 hci_dev_lock_bh(hdev);
697 inquiry_cache_flush(hdev);
698 hci_conn_hash_flush(hdev);
699 hci_dev_unlock_bh(hdev);
700
701 if (hdev->flush)
702 hdev->flush(hdev);
703
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900704 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300705 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200708 ret = __hci_request(hdev, hci_reset_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711done:
712 tasklet_enable(&hdev->tx_task);
713 hci_req_unlock(hdev);
714 hci_dev_put(hdev);
715 return ret;
716}
717
718int hci_dev_reset_stat(__u16 dev)
719{
720 struct hci_dev *hdev;
721 int ret = 0;
722
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200723 hdev = hci_dev_get(dev);
724 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 return -ENODEV;
726
727 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
728
729 hci_dev_put(hdev);
730
731 return ret;
732}
733
734int hci_dev_cmd(unsigned int cmd, void __user *arg)
735{
736 struct hci_dev *hdev;
737 struct hci_dev_req dr;
738 int err = 0;
739
740 if (copy_from_user(&dr, arg, sizeof(dr)))
741 return -EFAULT;
742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200743 hdev = hci_dev_get(dr.dev_id);
744 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 return -ENODEV;
746
747 switch (cmd) {
748 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 break;
752
753 case HCISETENCRYPT:
754 if (!lmp_encrypt_capable(hdev)) {
755 err = -EOPNOTSUPP;
756 break;
757 }
758
759 if (!test_bit(HCI_AUTH, &hdev->flags)) {
760 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200761 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 if (err)
764 break;
765 }
766
Marcel Holtmann04837f62006-07-03 10:02:33 +0200767 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
768 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 break;
770
771 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200772 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
773 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 break;
775
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200776 case HCISETLINKPOL:
777 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
779 break;
780
781 case HCISETLINKMODE:
782 hdev->link_mode = ((__u16) dr.dev_opt) &
783 (HCI_LM_MASTER | HCI_LM_ACCEPT);
784 break;
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 case HCISETPTYPE:
787 hdev->pkt_type = (__u16) dr.dev_opt;
788 break;
789
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200791 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
792 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 break;
794
795 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200796 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
797 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 break;
799
800 default:
801 err = -EINVAL;
802 break;
803 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200804
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 hci_dev_put(hdev);
806 return err;
807}
808
809int hci_get_dev_list(void __user *arg)
810{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200811 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 struct hci_dev_list_req *dl;
813 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 int n = 0, size, err;
815 __u16 dev_num;
816
817 if (get_user(dev_num, (__u16 __user *) arg))
818 return -EFAULT;
819
820 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
821 return -EINVAL;
822
823 size = sizeof(*dl) + dev_num * sizeof(*dr);
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 dl = kzalloc(size, GFP_KERNEL);
826 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENOMEM;
828
829 dr = dl->dev_req;
830
831 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200832 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200834 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200835
836 if (!test_bit(HCI_MGMT, &hdev->flags))
837 set_bit(HCI_PAIRABLE, &hdev->flags);
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 (dr + n)->dev_id = hdev->id;
840 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 if (++n >= dev_num)
843 break;
844 }
845 read_unlock_bh(&hci_dev_list_lock);
846
847 dl->dev_num = n;
848 size = sizeof(*dl) + n * sizeof(*dr);
849
850 err = copy_to_user(arg, dl, size);
851 kfree(dl);
852
853 return err ? -EFAULT : 0;
854}
855
856int hci_get_dev_info(void __user *arg)
857{
858 struct hci_dev *hdev;
859 struct hci_dev_info di;
860 int err = 0;
861
862 if (copy_from_user(&di, arg, sizeof(di)))
863 return -EFAULT;
864
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 hdev = hci_dev_get(di.dev_id);
866 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 return -ENODEV;
868
Johan Hedberg32435532011-11-07 22:16:04 +0200869 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
870 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200871
Johan Hedbergc542a062011-01-26 13:11:03 +0200872 if (!test_bit(HCI_MGMT, &hdev->flags))
873 set_bit(HCI_PAIRABLE, &hdev->flags);
874
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 strcpy(di.name, hdev->name);
876 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100877 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 di.flags = hdev->flags;
879 di.pkt_type = hdev->pkt_type;
880 di.acl_mtu = hdev->acl_mtu;
881 di.acl_pkts = hdev->acl_pkts;
882 di.sco_mtu = hdev->sco_mtu;
883 di.sco_pkts = hdev->sco_pkts;
884 di.link_policy = hdev->link_policy;
885 di.link_mode = hdev->link_mode;
886
887 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
888 memcpy(&di.features, &hdev->features, sizeof(di.features));
889
890 if (copy_to_user(arg, &di, sizeof(di)))
891 err = -EFAULT;
892
893 hci_dev_put(hdev);
894
895 return err;
896}
897
898/* ---- Interface to HCI drivers ---- */
899
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200900static int hci_rfkill_set_block(void *data, bool blocked)
901{
902 struct hci_dev *hdev = data;
903
904 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
905
906 if (!blocked)
907 return 0;
908
909 hci_dev_do_close(hdev);
910
911 return 0;
912}
913
914static const struct rfkill_ops hci_rfkill_ops = {
915 .set_block = hci_rfkill_set_block,
916};
917
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918/* Alloc HCI device */
919struct hci_dev *hci_alloc_dev(void)
920{
921 struct hci_dev *hdev;
922
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200923 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 if (!hdev)
925 return NULL;
926
David Herrmann0ac7e702011-10-08 14:58:47 +0200927 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 skb_queue_head_init(&hdev->driver_init);
929
930 return hdev;
931}
932EXPORT_SYMBOL(hci_alloc_dev);
933
934/* Free HCI device */
935void hci_free_dev(struct hci_dev *hdev)
936{
937 skb_queue_purge(&hdev->driver_init);
938
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200939 /* will free via device release */
940 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941}
942EXPORT_SYMBOL(hci_free_dev);
943
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200944static void hci_power_on(struct work_struct *work)
945{
946 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
947
948 BT_DBG("%s", hdev->name);
949
950 if (hci_dev_open(hdev->id) < 0)
951 return;
952
953 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200954 queue_delayed_work(hdev->workqueue, &hdev->power_off,
955 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200956
957 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200958 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200959}
960
961static void hci_power_off(struct work_struct *work)
962{
Johan Hedberg32435532011-11-07 22:16:04 +0200963 struct hci_dev *hdev = container_of(work, struct hci_dev,
964 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200965
966 BT_DBG("%s", hdev->name);
967
Johan Hedberg32435532011-11-07 22:16:04 +0200968 clear_bit(HCI_AUTO_OFF, &hdev->flags);
969
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200970 hci_dev_close(hdev->id);
971}
972
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200973static void hci_discov_off(struct work_struct *work)
974{
975 struct hci_dev *hdev;
976 u8 scan = SCAN_PAGE;
977
978 hdev = container_of(work, struct hci_dev, discov_off.work);
979
980 BT_DBG("%s", hdev->name);
981
982 hci_dev_lock_bh(hdev);
983
984 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
985
986 hdev->discov_timeout = 0;
987
988 hci_dev_unlock_bh(hdev);
989}
990
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200991int hci_uuids_clear(struct hci_dev *hdev)
992{
993 struct list_head *p, *n;
994
995 list_for_each_safe(p, n, &hdev->uuids) {
996 struct bt_uuid *uuid;
997
998 uuid = list_entry(p, struct bt_uuid, list);
999
1000 list_del(p);
1001 kfree(uuid);
1002 }
1003
1004 return 0;
1005}
1006
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001007int hci_link_keys_clear(struct hci_dev *hdev)
1008{
1009 struct list_head *p, *n;
1010
1011 list_for_each_safe(p, n, &hdev->link_keys) {
1012 struct link_key *key;
1013
1014 key = list_entry(p, struct link_key, list);
1015
1016 list_del(p);
1017 kfree(key);
1018 }
1019
1020 return 0;
1021}
1022
1023struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1024{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001025 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001026
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001027 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001028 if (bacmp(bdaddr, &k->bdaddr) == 0)
1029 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001030
1031 return NULL;
1032}
1033
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001034static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1035 u8 key_type, u8 old_key_type)
1036{
1037 /* Legacy key */
1038 if (key_type < 0x03)
1039 return 1;
1040
1041 /* Debug keys are insecure so don't store them persistently */
1042 if (key_type == HCI_LK_DEBUG_COMBINATION)
1043 return 0;
1044
1045 /* Changed combination key and there's no previous one */
1046 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1047 return 0;
1048
1049 /* Security mode 3 case */
1050 if (!conn)
1051 return 1;
1052
1053 /* Neither local nor remote side had no-bonding as requirement */
1054 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1055 return 1;
1056
1057 /* Local side had dedicated bonding as requirement */
1058 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1059 return 1;
1060
1061 /* Remote side had dedicated bonding as requirement */
1062 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1063 return 1;
1064
1065 /* If none of the above criteria match, then don't store the key
1066 * persistently */
1067 return 0;
1068}
1069
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001070struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1071{
1072 struct link_key *k;
1073
1074 list_for_each_entry(k, &hdev->link_keys, list) {
1075 struct key_master_id *id;
1076
1077 if (k->type != HCI_LK_SMP_LTK)
1078 continue;
1079
1080 if (k->dlen != sizeof(*id))
1081 continue;
1082
1083 id = (void *) &k->data;
1084 if (id->ediv == ediv &&
1085 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1086 return k;
1087 }
1088
1089 return NULL;
1090}
1091EXPORT_SYMBOL(hci_find_ltk);
1092
1093struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1094 bdaddr_t *bdaddr, u8 type)
1095{
1096 struct link_key *k;
1097
1098 list_for_each_entry(k, &hdev->link_keys, list)
1099 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1100 return k;
1101
1102 return NULL;
1103}
1104EXPORT_SYMBOL(hci_find_link_key_type);
1105
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001106int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1107 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001108{
1109 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001110 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001111
1112 old_key = hci_find_link_key(hdev, bdaddr);
1113 if (old_key) {
1114 old_key_type = old_key->type;
1115 key = old_key;
1116 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001117 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001118 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1119 if (!key)
1120 return -ENOMEM;
1121 list_add(&key->list, &hdev->link_keys);
1122 }
1123
1124 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1125
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001126 /* Some buggy controller combinations generate a changed
1127 * combination key for legacy pairing even when there's no
1128 * previous key */
1129 if (type == HCI_LK_CHANGED_COMBINATION &&
1130 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001131 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001132 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001133 if (conn)
1134 conn->key_type = type;
1135 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001136
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001137 bacpy(&key->bdaddr, bdaddr);
1138 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001139 key->pin_len = pin_len;
1140
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001141 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001142 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001143 else
1144 key->type = type;
1145
Johan Hedberg4df378a2011-04-28 11:29:03 -07001146 if (!new_key)
1147 return 0;
1148
1149 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1150
Johan Hedberg744cf192011-11-08 20:40:14 +02001151 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001152
1153 if (!persistent) {
1154 list_del(&key->list);
1155 kfree(key);
1156 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001157
1158 return 0;
1159}
1160
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001161int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001162 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001163{
1164 struct link_key *key, *old_key;
1165 struct key_master_id *id;
1166 u8 old_key_type;
1167
1168 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1169
1170 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1171 if (old_key) {
1172 key = old_key;
1173 old_key_type = old_key->type;
1174 } else {
1175 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1176 if (!key)
1177 return -ENOMEM;
1178 list_add(&key->list, &hdev->link_keys);
1179 old_key_type = 0xff;
1180 }
1181
1182 key->dlen = sizeof(*id);
1183
1184 bacpy(&key->bdaddr, bdaddr);
1185 memcpy(key->val, ltk, sizeof(key->val));
1186 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001187 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001188
1189 id = (void *) &key->data;
1190 id->ediv = ediv;
1191 memcpy(id->rand, rand, sizeof(id->rand));
1192
1193 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001194 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001195
1196 return 0;
1197}
1198
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001199int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1200{
1201 struct link_key *key;
1202
1203 key = hci_find_link_key(hdev, bdaddr);
1204 if (!key)
1205 return -ENOENT;
1206
1207 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1208
1209 list_del(&key->list);
1210 kfree(key);
1211
1212 return 0;
1213}
1214
Ville Tervo6bd32322011-02-16 16:32:41 +02001215/* HCI command timer function */
1216static void hci_cmd_timer(unsigned long arg)
1217{
1218 struct hci_dev *hdev = (void *) arg;
1219
1220 BT_ERR("%s command tx timeout", hdev->name);
1221 atomic_set(&hdev->cmd_cnt, 1);
1222 tasklet_schedule(&hdev->cmd_task);
1223}
1224
Szymon Janc2763eda2011-03-22 13:12:22 +01001225struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1226 bdaddr_t *bdaddr)
1227{
1228 struct oob_data *data;
1229
1230 list_for_each_entry(data, &hdev->remote_oob_data, list)
1231 if (bacmp(bdaddr, &data->bdaddr) == 0)
1232 return data;
1233
1234 return NULL;
1235}
1236
1237int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1238{
1239 struct oob_data *data;
1240
1241 data = hci_find_remote_oob_data(hdev, bdaddr);
1242 if (!data)
1243 return -ENOENT;
1244
1245 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1246
1247 list_del(&data->list);
1248 kfree(data);
1249
1250 return 0;
1251}
1252
1253int hci_remote_oob_data_clear(struct hci_dev *hdev)
1254{
1255 struct oob_data *data, *n;
1256
1257 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1258 list_del(&data->list);
1259 kfree(data);
1260 }
1261
1262 return 0;
1263}
1264
1265int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1266 u8 *randomizer)
1267{
1268 struct oob_data *data;
1269
1270 data = hci_find_remote_oob_data(hdev, bdaddr);
1271
1272 if (!data) {
1273 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1274 if (!data)
1275 return -ENOMEM;
1276
1277 bacpy(&data->bdaddr, bdaddr);
1278 list_add(&data->list, &hdev->remote_oob_data);
1279 }
1280
1281 memcpy(data->hash, hash, sizeof(data->hash));
1282 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1283
1284 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1285
1286 return 0;
1287}
1288
Antti Julkub2a66aa2011-06-15 12:01:14 +03001289struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1290 bdaddr_t *bdaddr)
1291{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001292 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001293
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001294 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001295 if (bacmp(bdaddr, &b->bdaddr) == 0)
1296 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001297
1298 return NULL;
1299}
1300
1301int hci_blacklist_clear(struct hci_dev *hdev)
1302{
1303 struct list_head *p, *n;
1304
1305 list_for_each_safe(p, n, &hdev->blacklist) {
1306 struct bdaddr_list *b;
1307
1308 b = list_entry(p, struct bdaddr_list, list);
1309
1310 list_del(p);
1311 kfree(b);
1312 }
1313
1314 return 0;
1315}
1316
1317int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1318{
1319 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001320
1321 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1322 return -EBADF;
1323
Antti Julku5e762442011-08-25 16:48:02 +03001324 if (hci_blacklist_lookup(hdev, bdaddr))
1325 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001326
1327 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001328 if (!entry)
1329 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001330
1331 bacpy(&entry->bdaddr, bdaddr);
1332
1333 list_add(&entry->list, &hdev->blacklist);
1334
Johan Hedberg744cf192011-11-08 20:40:14 +02001335 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001336}
1337
1338int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001341
Szymon Janc1ec918c2011-11-16 09:32:21 +01001342 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001343 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001344
1345 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001346 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001347 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001348
1349 list_del(&entry->list);
1350 kfree(entry);
1351
Johan Hedberg744cf192011-11-08 20:40:14 +02001352 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001353}
1354
Andre Guedes35815082011-05-26 16:23:53 -03001355static void hci_clear_adv_cache(unsigned long arg)
1356{
1357 struct hci_dev *hdev = (void *) arg;
1358
1359 hci_dev_lock(hdev);
1360
1361 hci_adv_entries_clear(hdev);
1362
1363 hci_dev_unlock(hdev);
1364}
1365
Andre Guedes76c86862011-05-26 16:23:50 -03001366int hci_adv_entries_clear(struct hci_dev *hdev)
1367{
1368 struct adv_entry *entry, *tmp;
1369
1370 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1371 list_del(&entry->list);
1372 kfree(entry);
1373 }
1374
1375 BT_DBG("%s adv cache cleared", hdev->name);
1376
1377 return 0;
1378}
1379
1380struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381{
1382 struct adv_entry *entry;
1383
1384 list_for_each_entry(entry, &hdev->adv_entries, list)
1385 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1386 return entry;
1387
1388 return NULL;
1389}
1390
1391static inline int is_connectable_adv(u8 evt_type)
1392{
1393 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1394 return 1;
1395
1396 return 0;
1397}
1398
1399int hci_add_adv_entry(struct hci_dev *hdev,
1400 struct hci_ev_le_advertising_info *ev)
1401{
1402 struct adv_entry *entry;
1403
1404 if (!is_connectable_adv(ev->evt_type))
1405 return -EINVAL;
1406
1407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
1409 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 return 0;
1411
1412 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1413 if (!entry)
1414 return -ENOMEM;
1415
1416 bacpy(&entry->bdaddr, &ev->bdaddr);
1417 entry->bdaddr_type = ev->bdaddr_type;
1418
1419 list_add(&entry->list, &hdev->adv_entries);
1420
1421 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1422 batostr(&entry->bdaddr), entry->bdaddr_type);
1423
1424 return 0;
1425}
1426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427/* Register HCI device */
1428int hci_register_dev(struct hci_dev *hdev)
1429{
1430 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001431 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001433 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1434 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
1436 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 return -EINVAL;
1438
Mat Martineau08add512011-11-02 16:18:36 -07001439 /* Do not allow HCI_AMP devices to register at index 0,
1440 * so the index can be used as the AMP controller ID.
1441 */
1442 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 write_lock_bh(&hci_dev_list_lock);
1445
1446 /* Find first available device id */
1447 list_for_each(p, &hci_dev_list) {
1448 if (list_entry(p, struct hci_dev, list)->id != id)
1449 break;
1450 head = p; id++;
1451 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 sprintf(hdev->name, "hci%d", id);
1454 hdev->id = id;
1455 list_add(&hdev->list, head);
1456
1457 atomic_set(&hdev->refcnt, 1);
1458 spin_lock_init(&hdev->lock);
1459
1460 hdev->flags = 0;
1461 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001462 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001464 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
Marcel Holtmann04837f62006-07-03 10:02:33 +02001466 hdev->idle_timeout = 0;
1467 hdev->sniff_max_interval = 800;
1468 hdev->sniff_min_interval = 80;
1469
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001470 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1472 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1473
1474 skb_queue_head_init(&hdev->rx_q);
1475 skb_queue_head_init(&hdev->cmd_q);
1476 skb_queue_head_init(&hdev->raw_q);
1477
Ville Tervo6bd32322011-02-16 16:32:41 +02001478 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1479
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301480 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001481 hdev->reassembly[i] = NULL;
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001484 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 inquiry_cache_init(hdev);
1487
1488 hci_conn_hash_init(hdev);
1489
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001490 INIT_LIST_HEAD(&hdev->mgmt_pending);
1491
David Millerea4bd8b2010-07-30 21:54:49 -07001492 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001493
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001494 INIT_LIST_HEAD(&hdev->uuids);
1495
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001496 INIT_LIST_HEAD(&hdev->link_keys);
1497
Szymon Janc2763eda2011-03-22 13:12:22 +01001498 INIT_LIST_HEAD(&hdev->remote_oob_data);
1499
Andre Guedes76c86862011-05-26 16:23:50 -03001500 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001501 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1502 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001503
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001504 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001505 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001506
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001507 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1510
1511 atomic_set(&hdev->promisc, 0);
1512
1513 write_unlock_bh(&hci_dev_list_lock);
1514
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001515 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001516 if (!hdev->workqueue) {
1517 error = -ENOMEM;
1518 goto err;
1519 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001520
David Herrmann33ca9542011-10-08 14:58:49 +02001521 error = hci_add_sysfs(hdev);
1522 if (error < 0)
1523 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001525 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1526 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1527 if (hdev->rfkill) {
1528 if (rfkill_register(hdev->rfkill) < 0) {
1529 rfkill_destroy(hdev->rfkill);
1530 hdev->rfkill = NULL;
1531 }
1532 }
1533
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001534 set_bit(HCI_AUTO_OFF, &hdev->flags);
1535 set_bit(HCI_SETUP, &hdev->flags);
1536 queue_work(hdev->workqueue, &hdev->power_on);
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 hci_notify(hdev, HCI_DEV_REG);
1539
1540 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001541
David Herrmann33ca9542011-10-08 14:58:49 +02001542err_wqueue:
1543 destroy_workqueue(hdev->workqueue);
1544err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001545 write_lock_bh(&hci_dev_list_lock);
1546 list_del(&hdev->list);
1547 write_unlock_bh(&hci_dev_list_lock);
1548
David Herrmann33ca9542011-10-08 14:58:49 +02001549 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550}
1551EXPORT_SYMBOL(hci_register_dev);
1552
1553/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001554void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
Marcel Holtmannef222012007-07-11 06:42:04 +02001556 int i;
1557
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001558 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 write_lock_bh(&hci_dev_list_lock);
1561 list_del(&hdev->list);
1562 write_unlock_bh(&hci_dev_list_lock);
1563
1564 hci_dev_do_close(hdev);
1565
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301566 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001567 kfree_skb(hdev->reassembly[i]);
1568
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001569 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001570 !test_bit(HCI_SETUP, &hdev->flags)) {
1571 hci_dev_lock_bh(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001572 mgmt_index_removed(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001573 hci_dev_unlock_bh(hdev);
1574 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001575
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001576 /* mgmt_index_removed should take care of emptying the
1577 * pending list */
1578 BUG_ON(!list_empty(&hdev->mgmt_pending));
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 hci_notify(hdev, HCI_DEV_UNREG);
1581
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001582 if (hdev->rfkill) {
1583 rfkill_unregister(hdev->rfkill);
1584 rfkill_destroy(hdev->rfkill);
1585 }
1586
David Herrmannce242972011-10-08 14:58:48 +02001587 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001588
Andre Guedes35815082011-05-26 16:23:53 -03001589 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001590
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001591 destroy_workqueue(hdev->workqueue);
1592
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001593 hci_dev_lock_bh(hdev);
1594 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001595 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001596 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001597 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001598 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001599 hci_dev_unlock_bh(hdev);
1600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602}
1603EXPORT_SYMBOL(hci_unregister_dev);
1604
1605/* Suspend HCI device */
1606int hci_suspend_dev(struct hci_dev *hdev)
1607{
1608 hci_notify(hdev, HCI_DEV_SUSPEND);
1609 return 0;
1610}
1611EXPORT_SYMBOL(hci_suspend_dev);
1612
1613/* Resume HCI device */
1614int hci_resume_dev(struct hci_dev *hdev)
1615{
1616 hci_notify(hdev, HCI_DEV_RESUME);
1617 return 0;
1618}
1619EXPORT_SYMBOL(hci_resume_dev);
1620
Marcel Holtmann76bca882009-11-18 00:40:39 +01001621/* Receive frame from HCI drivers */
1622int hci_recv_frame(struct sk_buff *skb)
1623{
1624 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1625 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1626 && !test_bit(HCI_INIT, &hdev->flags))) {
1627 kfree_skb(skb);
1628 return -ENXIO;
1629 }
1630
1631 /* Incomming skb */
1632 bt_cb(skb)->incoming = 1;
1633
1634 /* Time stamp */
1635 __net_timestamp(skb);
1636
1637 /* Queue frame for rx task */
1638 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001639 tasklet_schedule(&hdev->rx_task);
1640
Marcel Holtmann76bca882009-11-18 00:40:39 +01001641 return 0;
1642}
1643EXPORT_SYMBOL(hci_recv_frame);
1644
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301645static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001646 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301647{
1648 int len = 0;
1649 int hlen = 0;
1650 int remain = count;
1651 struct sk_buff *skb;
1652 struct bt_skb_cb *scb;
1653
1654 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1655 index >= NUM_REASSEMBLY)
1656 return -EILSEQ;
1657
1658 skb = hdev->reassembly[index];
1659
1660 if (!skb) {
1661 switch (type) {
1662 case HCI_ACLDATA_PKT:
1663 len = HCI_MAX_FRAME_SIZE;
1664 hlen = HCI_ACL_HDR_SIZE;
1665 break;
1666 case HCI_EVENT_PKT:
1667 len = HCI_MAX_EVENT_SIZE;
1668 hlen = HCI_EVENT_HDR_SIZE;
1669 break;
1670 case HCI_SCODATA_PKT:
1671 len = HCI_MAX_SCO_SIZE;
1672 hlen = HCI_SCO_HDR_SIZE;
1673 break;
1674 }
1675
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001676 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301677 if (!skb)
1678 return -ENOMEM;
1679
1680 scb = (void *) skb->cb;
1681 scb->expect = hlen;
1682 scb->pkt_type = type;
1683
1684 skb->dev = (void *) hdev;
1685 hdev->reassembly[index] = skb;
1686 }
1687
1688 while (count) {
1689 scb = (void *) skb->cb;
1690 len = min(scb->expect, (__u16)count);
1691
1692 memcpy(skb_put(skb, len), data, len);
1693
1694 count -= len;
1695 data += len;
1696 scb->expect -= len;
1697 remain = count;
1698
1699 switch (type) {
1700 case HCI_EVENT_PKT:
1701 if (skb->len == HCI_EVENT_HDR_SIZE) {
1702 struct hci_event_hdr *h = hci_event_hdr(skb);
1703 scb->expect = h->plen;
1704
1705 if (skb_tailroom(skb) < scb->expect) {
1706 kfree_skb(skb);
1707 hdev->reassembly[index] = NULL;
1708 return -ENOMEM;
1709 }
1710 }
1711 break;
1712
1713 case HCI_ACLDATA_PKT:
1714 if (skb->len == HCI_ACL_HDR_SIZE) {
1715 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1716 scb->expect = __le16_to_cpu(h->dlen);
1717
1718 if (skb_tailroom(skb) < scb->expect) {
1719 kfree_skb(skb);
1720 hdev->reassembly[index] = NULL;
1721 return -ENOMEM;
1722 }
1723 }
1724 break;
1725
1726 case HCI_SCODATA_PKT:
1727 if (skb->len == HCI_SCO_HDR_SIZE) {
1728 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1729 scb->expect = h->dlen;
1730
1731 if (skb_tailroom(skb) < scb->expect) {
1732 kfree_skb(skb);
1733 hdev->reassembly[index] = NULL;
1734 return -ENOMEM;
1735 }
1736 }
1737 break;
1738 }
1739
1740 if (scb->expect == 0) {
1741 /* Complete frame */
1742
1743 bt_cb(skb)->pkt_type = type;
1744 hci_recv_frame(skb);
1745
1746 hdev->reassembly[index] = NULL;
1747 return remain;
1748 }
1749 }
1750
1751 return remain;
1752}
1753
Marcel Holtmannef222012007-07-11 06:42:04 +02001754int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1755{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301756 int rem = 0;
1757
Marcel Holtmannef222012007-07-11 06:42:04 +02001758 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1759 return -EILSEQ;
1760
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001761 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001762 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301763 if (rem < 0)
1764 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001765
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301766 data += (count - rem);
1767 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001768 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001769
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301770 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001771}
1772EXPORT_SYMBOL(hci_recv_fragment);
1773
Suraj Sumangala99811512010-07-14 13:02:19 +05301774#define STREAM_REASSEMBLY 0
1775
1776int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1777{
1778 int type;
1779 int rem = 0;
1780
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001781 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301782 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1783
1784 if (!skb) {
1785 struct { char type; } *pkt;
1786
1787 /* Start of the frame */
1788 pkt = data;
1789 type = pkt->type;
1790
1791 data++;
1792 count--;
1793 } else
1794 type = bt_cb(skb)->pkt_type;
1795
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001796 rem = hci_reassembly(hdev, type, data, count,
1797 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301798 if (rem < 0)
1799 return rem;
1800
1801 data += (count - rem);
1802 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001803 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301804
1805 return rem;
1806}
1807EXPORT_SYMBOL(hci_recv_stream_fragment);
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809/* ---- Interface to upper protocols ---- */
1810
1811/* Register/Unregister protocols.
1812 * hci_task_lock is used to ensure that no tasks are running. */
1813int hci_register_proto(struct hci_proto *hp)
1814{
1815 int err = 0;
1816
1817 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1818
1819 if (hp->id >= HCI_MAX_PROTO)
1820 return -EINVAL;
1821
1822 write_lock_bh(&hci_task_lock);
1823
1824 if (!hci_proto[hp->id])
1825 hci_proto[hp->id] = hp;
1826 else
1827 err = -EEXIST;
1828
1829 write_unlock_bh(&hci_task_lock);
1830
1831 return err;
1832}
1833EXPORT_SYMBOL(hci_register_proto);
1834
1835int hci_unregister_proto(struct hci_proto *hp)
1836{
1837 int err = 0;
1838
1839 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1840
1841 if (hp->id >= HCI_MAX_PROTO)
1842 return -EINVAL;
1843
1844 write_lock_bh(&hci_task_lock);
1845
1846 if (hci_proto[hp->id])
1847 hci_proto[hp->id] = NULL;
1848 else
1849 err = -ENOENT;
1850
1851 write_unlock_bh(&hci_task_lock);
1852
1853 return err;
1854}
1855EXPORT_SYMBOL(hci_unregister_proto);
1856
1857int hci_register_cb(struct hci_cb *cb)
1858{
1859 BT_DBG("%p name %s", cb, cb->name);
1860
1861 write_lock_bh(&hci_cb_list_lock);
1862 list_add(&cb->list, &hci_cb_list);
1863 write_unlock_bh(&hci_cb_list_lock);
1864
1865 return 0;
1866}
1867EXPORT_SYMBOL(hci_register_cb);
1868
1869int hci_unregister_cb(struct hci_cb *cb)
1870{
1871 BT_DBG("%p name %s", cb, cb->name);
1872
1873 write_lock_bh(&hci_cb_list_lock);
1874 list_del(&cb->list);
1875 write_unlock_bh(&hci_cb_list_lock);
1876
1877 return 0;
1878}
1879EXPORT_SYMBOL(hci_unregister_cb);
1880
1881static int hci_send_frame(struct sk_buff *skb)
1882{
1883 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1884
1885 if (!hdev) {
1886 kfree_skb(skb);
1887 return -ENODEV;
1888 }
1889
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001890 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891
1892 if (atomic_read(&hdev->promisc)) {
1893 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001894 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001896 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 }
1898
1899 /* Get rid of skb owner, prior to sending to the driver. */
1900 skb_orphan(skb);
1901
1902 return hdev->send(skb);
1903}
1904
1905/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001906int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907{
1908 int len = HCI_COMMAND_HDR_SIZE + plen;
1909 struct hci_command_hdr *hdr;
1910 struct sk_buff *skb;
1911
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001912 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914 skb = bt_skb_alloc(len, GFP_ATOMIC);
1915 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001916 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 return -ENOMEM;
1918 }
1919
1920 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001921 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 hdr->plen = plen;
1923
1924 if (plen)
1925 memcpy(skb_put(skb, plen), param, plen);
1926
1927 BT_DBG("skb len %d", skb->len);
1928
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001929 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001931
Johan Hedberga5040ef2011-01-10 13:28:59 +02001932 if (test_bit(HCI_INIT, &hdev->flags))
1933 hdev->init_last_cmd = opcode;
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001936 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 return 0;
1939}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
1941/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001942void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943{
1944 struct hci_command_hdr *hdr;
1945
1946 if (!hdev->sent_cmd)
1947 return NULL;
1948
1949 hdr = (void *) hdev->sent_cmd->data;
1950
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001951 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 return NULL;
1953
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001954 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
1956 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1957}
1958
1959/* Send ACL data */
1960static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1961{
1962 struct hci_acl_hdr *hdr;
1963 int len = skb->len;
1964
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001965 skb_push(skb, HCI_ACL_HDR_SIZE);
1966 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001967 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001968 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1969 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970}
1971
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001972static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1973 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
1975 struct hci_dev *hdev = conn->hdev;
1976 struct sk_buff *list;
1977
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001978 list = skb_shinfo(skb)->frag_list;
1979 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 /* Non fragmented */
1981 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1982
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001983 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 } else {
1985 /* Fragmented */
1986 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1987
1988 skb_shinfo(skb)->frag_list = NULL;
1989
1990 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001991 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001993 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001994
1995 flags &= ~ACL_START;
1996 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 do {
1998 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002001 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002002 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
2004 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2005
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002006 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 } while (list);
2008
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002009 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002011}
2012
2013void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2014{
2015 struct hci_conn *conn = chan->conn;
2016 struct hci_dev *hdev = conn->hdev;
2017
2018 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2019
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2022 hci_add_acl_hdr(skb, conn->handle, flags);
2023
2024 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002026 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027}
2028EXPORT_SYMBOL(hci_send_acl);
2029
2030/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002031void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032{
2033 struct hci_dev *hdev = conn->hdev;
2034 struct hci_sco_hdr hdr;
2035
2036 BT_DBG("%s len %d", hdev->name, skb->len);
2037
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002038 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 hdr.dlen = skb->len;
2040
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002041 skb_push(skb, HCI_SCO_HDR_SIZE);
2042 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002043 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002046 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002047
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002049 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050}
2051EXPORT_SYMBOL(hci_send_sco);
2052
2053/* ---- HCI TX task (outgoing data) ---- */
2054
2055/* HCI Connection scheduler */
2056static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2057{
2058 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002059 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002062 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002064 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002065 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002067
2068 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2069 continue;
2070
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 num++;
2072
2073 if (c->sent < min) {
2074 min = c->sent;
2075 conn = c;
2076 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002077
2078 if (hci_conn_num(hdev, type) == num)
2079 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 }
2081
2082 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002083 int cnt, q;
2084
2085 switch (conn->type) {
2086 case ACL_LINK:
2087 cnt = hdev->acl_cnt;
2088 break;
2089 case SCO_LINK:
2090 case ESCO_LINK:
2091 cnt = hdev->sco_cnt;
2092 break;
2093 case LE_LINK:
2094 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2095 break;
2096 default:
2097 cnt = 0;
2098 BT_ERR("Unknown link type");
2099 }
2100
2101 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 *quote = q ? q : 1;
2103 } else
2104 *quote = 0;
2105
2106 BT_DBG("conn %p quote %d", conn, *quote);
2107 return conn;
2108}
2109
Ville Tervobae1f5d92011-02-10 22:38:53 -03002110static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
2112 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002113 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Ville Tervobae1f5d92011-02-10 22:38:53 -03002115 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
2117 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002118 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002119 if (c->type == type && c->sent) {
2120 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 hdev->name, batostr(&c->dst));
2122 hci_acl_disconn(c, 0x13);
2123 }
2124 }
2125}
2126
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002127static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2128 int *quote)
2129{
2130 struct hci_conn_hash *h = &hdev->conn_hash;
2131 struct hci_chan *chan = NULL;
2132 int num = 0, min = ~0, cur_prio = 0;
2133 struct hci_conn *conn;
2134 int cnt, q, conn_num = 0;
2135
2136 BT_DBG("%s", hdev->name);
2137
2138 list_for_each_entry(conn, &h->list, list) {
2139 struct hci_chan_hash *ch;
2140 struct hci_chan *tmp;
2141
2142 if (conn->type != type)
2143 continue;
2144
2145 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2146 continue;
2147
2148 conn_num++;
2149
2150 ch = &conn->chan_hash;
2151
2152 list_for_each_entry(tmp, &ch->list, list) {
2153 struct sk_buff *skb;
2154
2155 if (skb_queue_empty(&tmp->data_q))
2156 continue;
2157
2158 skb = skb_peek(&tmp->data_q);
2159 if (skb->priority < cur_prio)
2160 continue;
2161
2162 if (skb->priority > cur_prio) {
2163 num = 0;
2164 min = ~0;
2165 cur_prio = skb->priority;
2166 }
2167
2168 num++;
2169
2170 if (conn->sent < min) {
2171 min = conn->sent;
2172 chan = tmp;
2173 }
2174 }
2175
2176 if (hci_conn_num(hdev, type) == conn_num)
2177 break;
2178 }
2179
2180 if (!chan)
2181 return NULL;
2182
2183 switch (chan->conn->type) {
2184 case ACL_LINK:
2185 cnt = hdev->acl_cnt;
2186 break;
2187 case SCO_LINK:
2188 case ESCO_LINK:
2189 cnt = hdev->sco_cnt;
2190 break;
2191 case LE_LINK:
2192 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2193 break;
2194 default:
2195 cnt = 0;
2196 BT_ERR("Unknown link type");
2197 }
2198
2199 q = cnt / num;
2200 *quote = q ? q : 1;
2201 BT_DBG("chan %p quote %d", chan, *quote);
2202 return chan;
2203}
2204
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002205static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2206{
2207 struct hci_conn_hash *h = &hdev->conn_hash;
2208 struct hci_conn *conn;
2209 int num = 0;
2210
2211 BT_DBG("%s", hdev->name);
2212
2213 list_for_each_entry(conn, &h->list, list) {
2214 struct hci_chan_hash *ch;
2215 struct hci_chan *chan;
2216
2217 if (conn->type != type)
2218 continue;
2219
2220 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221 continue;
2222
2223 num++;
2224
2225 ch = &conn->chan_hash;
2226 list_for_each_entry(chan, &ch->list, list) {
2227 struct sk_buff *skb;
2228
2229 if (chan->sent) {
2230 chan->sent = 0;
2231 continue;
2232 }
2233
2234 if (skb_queue_empty(&chan->data_q))
2235 continue;
2236
2237 skb = skb_peek(&chan->data_q);
2238 if (skb->priority >= HCI_PRIO_MAX - 1)
2239 continue;
2240
2241 skb->priority = HCI_PRIO_MAX - 1;
2242
2243 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2244 skb->priority);
2245 }
2246
2247 if (hci_conn_num(hdev, type) == num)
2248 break;
2249 }
2250}
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252static inline void hci_sched_acl(struct hci_dev *hdev)
2253{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002254 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 struct sk_buff *skb;
2256 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002257 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
2259 BT_DBG("%s", hdev->name);
2260
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002261 if (!hci_conn_num(hdev, ACL_LINK))
2262 return;
2263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 if (!test_bit(HCI_RAW, &hdev->flags)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002267 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002268 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 }
2270
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002271 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002272
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002273 while (hdev->acl_cnt &&
2274 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002275 u32 priority = (skb_peek(&chan->data_q))->priority;
2276 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002277 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278 skb->len, skb->priority);
2279
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002280 /* Stop if priority has changed */
2281 if (skb->priority < priority)
2282 break;
2283
2284 skb = skb_dequeue(&chan->data_q);
2285
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002286 hci_conn_enter_active_mode(chan->conn,
2287 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 hci_send_frame(skb);
2290 hdev->acl_last_tx = jiffies;
2291
2292 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002293 chan->sent++;
2294 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 }
2296 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002297
2298 if (cnt != hdev->acl_cnt)
2299 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300}
2301
2302/* Schedule SCO */
2303static inline void hci_sched_sco(struct hci_dev *hdev)
2304{
2305 struct hci_conn *conn;
2306 struct sk_buff *skb;
2307 int quote;
2308
2309 BT_DBG("%s", hdev->name);
2310
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002311 if (!hci_conn_num(hdev, SCO_LINK))
2312 return;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316 BT_DBG("skb %p len %d", skb, skb->len);
2317 hci_send_frame(skb);
2318
2319 conn->sent++;
2320 if (conn->sent == ~0)
2321 conn->sent = 0;
2322 }
2323 }
2324}
2325
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002326static inline void hci_sched_esco(struct hci_dev *hdev)
2327{
2328 struct hci_conn *conn;
2329 struct sk_buff *skb;
2330 int quote;
2331
2332 BT_DBG("%s", hdev->name);
2333
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002334 if (!hci_conn_num(hdev, ESCO_LINK))
2335 return;
2336
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2341
2342 conn->sent++;
2343 if (conn->sent == ~0)
2344 conn->sent = 0;
2345 }
2346 }
2347}
2348
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002349static inline void hci_sched_le(struct hci_dev *hdev)
2350{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002351 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002352 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002353 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002354
2355 BT_DBG("%s", hdev->name);
2356
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002357 if (!hci_conn_num(hdev, LE_LINK))
2358 return;
2359
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002360 if (!test_bit(HCI_RAW, &hdev->flags)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002363 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002364 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002365 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002366 }
2367
2368 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002369 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002370 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002371 u32 priority = (skb_peek(&chan->data_q))->priority;
2372 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002373 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002375
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002376 /* Stop if priority has changed */
2377 if (skb->priority < priority)
2378 break;
2379
2380 skb = skb_dequeue(&chan->data_q);
2381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002382 hci_send_frame(skb);
2383 hdev->le_last_tx = jiffies;
2384
2385 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002386 chan->sent++;
2387 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002388 }
2389 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002390
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002391 if (hdev->le_pkts)
2392 hdev->le_cnt = cnt;
2393 else
2394 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002395
2396 if (cnt != tmp)
2397 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002398}
2399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400static void hci_tx_task(unsigned long arg)
2401{
2402 struct hci_dev *hdev = (struct hci_dev *) arg;
2403 struct sk_buff *skb;
2404
2405 read_lock(&hci_task_lock);
2406
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002407 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
2410 /* Schedule queues and send stuff to HCI driver */
2411
2412 hci_sched_acl(hdev);
2413
2414 hci_sched_sco(hdev);
2415
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002416 hci_sched_esco(hdev);
2417
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002418 hci_sched_le(hdev);
2419
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 /* Send next queued raw (unknown type) packet */
2421 while ((skb = skb_dequeue(&hdev->raw_q)))
2422 hci_send_frame(skb);
2423
2424 read_unlock(&hci_task_lock);
2425}
2426
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002427/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
2429/* ACL data packet */
2430static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2431{
2432 struct hci_acl_hdr *hdr = (void *) skb->data;
2433 struct hci_conn *conn;
2434 __u16 handle, flags;
2435
2436 skb_pull(skb, HCI_ACL_HDR_SIZE);
2437
2438 handle = __le16_to_cpu(hdr->handle);
2439 flags = hci_flags(handle);
2440 handle = hci_handle(handle);
2441
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2443
2444 hdev->stat.acl_rx++;
2445
2446 hci_dev_lock(hdev);
2447 conn = hci_conn_hash_lookup_handle(hdev, handle);
2448 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 if (conn) {
2451 register struct hci_proto *hp;
2452
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002453 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002456 hp = hci_proto[HCI_PROTO_L2CAP];
2457 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 hp->recv_acldata(conn, skb, flags);
2459 return;
2460 }
2461 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002462 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 hdev->name, handle);
2464 }
2465
2466 kfree_skb(skb);
2467}
2468
2469/* SCO data packet */
2470static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2471{
2472 struct hci_sco_hdr *hdr = (void *) skb->data;
2473 struct hci_conn *conn;
2474 __u16 handle;
2475
2476 skb_pull(skb, HCI_SCO_HDR_SIZE);
2477
2478 handle = __le16_to_cpu(hdr->handle);
2479
2480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2481
2482 hdev->stat.sco_rx++;
2483
2484 hci_dev_lock(hdev);
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 hci_dev_unlock(hdev);
2487
2488 if (conn) {
2489 register struct hci_proto *hp;
2490
2491 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002492 hp = hci_proto[HCI_PROTO_SCO];
2493 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hp->recv_scodata(conn, skb);
2495 return;
2496 }
2497 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002498 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 hdev->name, handle);
2500 }
2501
2502 kfree_skb(skb);
2503}
2504
Marcel Holtmann65164552005-10-28 19:20:48 +02002505static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506{
2507 struct hci_dev *hdev = (struct hci_dev *) arg;
2508 struct sk_buff *skb;
2509
2510 BT_DBG("%s", hdev->name);
2511
2512 read_lock(&hci_task_lock);
2513
2514 while ((skb = skb_dequeue(&hdev->rx_q))) {
2515 if (atomic_read(&hdev->promisc)) {
2516 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002517 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 }
2519
2520 if (test_bit(HCI_RAW, &hdev->flags)) {
2521 kfree_skb(skb);
2522 continue;
2523 }
2524
2525 if (test_bit(HCI_INIT, &hdev->flags)) {
2526 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002527 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 case HCI_ACLDATA_PKT:
2529 case HCI_SCODATA_PKT:
2530 kfree_skb(skb);
2531 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002532 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 }
2534
2535 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002536 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 case HCI_EVENT_PKT:
2538 hci_event_packet(hdev, skb);
2539 break;
2540
2541 case HCI_ACLDATA_PKT:
2542 BT_DBG("%s ACL data packet", hdev->name);
2543 hci_acldata_packet(hdev, skb);
2544 break;
2545
2546 case HCI_SCODATA_PKT:
2547 BT_DBG("%s SCO data packet", hdev->name);
2548 hci_scodata_packet(hdev, skb);
2549 break;
2550
2551 default:
2552 kfree_skb(skb);
2553 break;
2554 }
2555 }
2556
2557 read_unlock(&hci_task_lock);
2558}
2559
2560static void hci_cmd_task(unsigned long arg)
2561{
2562 struct hci_dev *hdev = (struct hci_dev *) arg;
2563 struct sk_buff *skb;
2564
2565 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002568 if (atomic_read(&hdev->cmd_cnt)) {
2569 skb = skb_dequeue(&hdev->cmd_q);
2570 if (!skb)
2571 return;
2572
Wei Yongjun7585b972009-02-25 18:29:52 +08002573 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002575 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2576 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 atomic_dec(&hdev->cmd_cnt);
2578 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002579 if (test_bit(HCI_RESET, &hdev->flags))
2580 del_timer(&hdev->cmd_timer);
2581 else
2582 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002583 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 } else {
2585 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002586 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 }
2588 }
2589}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002590
2591int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2592{
2593 /* General inquiry access code (GIAC) */
2594 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2595 struct hci_cp_inquiry cp;
2596
2597 BT_DBG("%s", hdev->name);
2598
2599 if (test_bit(HCI_INQUIRY, &hdev->flags))
2600 return -EINPROGRESS;
2601
2602 memset(&cp, 0, sizeof(cp));
2603 memcpy(&cp.lap, lap, sizeof(cp.lap));
2604 cp.length = length;
2605
2606 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2607}
Andre Guedes023d50492011-11-04 14:16:52 -03002608
2609int hci_cancel_inquiry(struct hci_dev *hdev)
2610{
2611 BT_DBG("%s", hdev->name);
2612
2613 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2614 return -EPERM;
2615
2616 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2617}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002618
2619module_param(enable_hs, bool, 0644);
2620MODULE_PARM_DESC(enable_hs, "Enable High Speed");