blob: e6e991331ef82f63119c41a75834fb11460b7e1a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080076static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
Alan Sterne041c682006-03-27 01:16:30 -080087 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Marcel Holtmann65164552005-10-28 19:20:48 +020090static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Alan Sterne041c682006-03-27 01:16:30 -080092 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/* ---- HCI requests ---- */
96
Johan Hedberg23bb5762010-12-21 23:01:27 +020097void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Johan Hedberg23bb5762010-12-21 23:01:27 +020099 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
Johan Hedberga5040ef2011-01-10 13:28:59 +0200101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200225 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241#endif
242
243 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200258 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
Marcel Holtmanna418b892008-11-30 12:17:28 +0100312 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900318/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200322 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200330 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200381
382 ie->next = cache->list;
383 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
Marcel Holtmann04837f62006-07-03 10:02:33 +0200455 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200470 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900486 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200503 hdev = hci_dev_get(dev);
504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
Marcel Holtmann943da252010-02-13 02:28:41 +0100524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200536 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Marcel Holtmann04837f62006-07-03 10:02:33 +0200538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Andre Guedeseead27d2011-06-30 19:20:55 -0300541 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300589 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
598 hci_dev_lock_bh(hdev);
599 inquiry_cache_flush(hdev);
600 hci_conn_hash_flush(hdev);
601 hci_dev_unlock_bh(hdev);
602
603 hci_notify(hdev, HCI_DEV_DOWN);
604
605 if (hdev->flush)
606 hdev->flush(hdev);
607
608 /* Reset device */
609 skb_queue_purge(&hdev->cmd_q);
610 atomic_set(&hdev->cmd_cnt, 1);
611 if (!test_bit(HCI_RAW, &hdev->flags)) {
612 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200614 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 clear_bit(HCI_INIT, &hdev->flags);
616 }
617
618 /* Kill cmd task */
619 tasklet_kill(&hdev->cmd_task);
620
621 /* Drop queues */
622 skb_queue_purge(&hdev->rx_q);
623 skb_queue_purge(&hdev->cmd_q);
624 skb_queue_purge(&hdev->raw_q);
625
626 /* Drop last sent command */
627 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300628 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 kfree_skb(hdev->sent_cmd);
630 hdev->sent_cmd = NULL;
631 }
632
633 /* After this point our queues are empty
634 * and no tasks are scheduled. */
635 hdev->close(hdev);
636
Johan Hedberg5add6af2010-12-16 10:00:37 +0200637 mgmt_powered(hdev->id, 0);
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 /* Clear flags */
640 hdev->flags = 0;
641
642 hci_req_unlock(hdev);
643
644 hci_dev_put(hdev);
645 return 0;
646}
647
648int hci_dev_close(__u16 dev)
649{
650 struct hci_dev *hdev;
651 int err;
652
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200653 hdev = hci_dev_get(dev);
654 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 return -ENODEV;
656 err = hci_dev_do_close(hdev);
657 hci_dev_put(hdev);
658 return err;
659}
660
661int hci_dev_reset(__u16 dev)
662{
663 struct hci_dev *hdev;
664 int ret = 0;
665
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200666 hdev = hci_dev_get(dev);
667 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return -ENODEV;
669
670 hci_req_lock(hdev);
671 tasklet_disable(&hdev->tx_task);
672
673 if (!test_bit(HCI_UP, &hdev->flags))
674 goto done;
675
676 /* Drop queues */
677 skb_queue_purge(&hdev->rx_q);
678 skb_queue_purge(&hdev->cmd_q);
679
680 hci_dev_lock_bh(hdev);
681 inquiry_cache_flush(hdev);
682 hci_conn_hash_flush(hdev);
683 hci_dev_unlock_bh(hdev);
684
685 if (hdev->flush)
686 hdev->flush(hdev);
687
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900688 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300689 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200692 ret = __hci_request(hdev, hci_reset_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695done:
696 tasklet_enable(&hdev->tx_task);
697 hci_req_unlock(hdev);
698 hci_dev_put(hdev);
699 return ret;
700}
701
702int hci_dev_reset_stat(__u16 dev)
703{
704 struct hci_dev *hdev;
705 int ret = 0;
706
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200707 hdev = hci_dev_get(dev);
708 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return -ENODEV;
710
711 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
712
713 hci_dev_put(hdev);
714
715 return ret;
716}
717
718int hci_dev_cmd(unsigned int cmd, void __user *arg)
719{
720 struct hci_dev *hdev;
721 struct hci_dev_req dr;
722 int err = 0;
723
724 if (copy_from_user(&dr, arg, sizeof(dr)))
725 return -EFAULT;
726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200727 hdev = hci_dev_get(dr.dev_id);
728 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return -ENODEV;
730
731 switch (cmd) {
732 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200733 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 break;
736
737 case HCISETENCRYPT:
738 if (!lmp_encrypt_capable(hdev)) {
739 err = -EOPNOTSUPP;
740 break;
741 }
742
743 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200745 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (err)
748 break;
749 }
750
Marcel Holtmann04837f62006-07-03 10:02:33 +0200751 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 break;
754
755 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200756 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 break;
759
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200760 case HCISETLINKPOL:
761 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
763 break;
764
765 case HCISETLINKMODE:
766 hdev->link_mode = ((__u16) dr.dev_opt) &
767 (HCI_LM_MASTER | HCI_LM_ACCEPT);
768 break;
769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 case HCISETPTYPE:
771 hdev->pkt_type = (__u16) dr.dev_opt;
772 break;
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200775 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 break;
778
779 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200780 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 break;
783
784 default:
785 err = -EINVAL;
786 break;
787 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 hci_dev_put(hdev);
790 return err;
791}
792
793int hci_get_dev_list(void __user *arg)
794{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200795 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct hci_dev_list_req *dl;
797 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 int n = 0, size, err;
799 __u16 dev_num;
800
801 if (get_user(dev_num, (__u16 __user *) arg))
802 return -EFAULT;
803
804 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
805 return -EINVAL;
806
807 size = sizeof(*dl) + dev_num * sizeof(*dr);
808
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200809 dl = kzalloc(size, GFP_KERNEL);
810 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return -ENOMEM;
812
813 dr = dl->dev_req;
814
815 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200816 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200817 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200818
819 if (!test_bit(HCI_MGMT, &hdev->flags))
820 set_bit(HCI_PAIRABLE, &hdev->flags);
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (++n >= dev_num)
826 break;
827 }
828 read_unlock_bh(&hci_dev_list_lock);
829
830 dl->dev_num = n;
831 size = sizeof(*dl) + n * sizeof(*dr);
832
833 err = copy_to_user(arg, dl, size);
834 kfree(dl);
835
836 return err ? -EFAULT : 0;
837}
838
839int hci_get_dev_info(void __user *arg)
840{
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
843 int err = 0;
844
845 if (copy_from_user(&di, arg, sizeof(di)))
846 return -EFAULT;
847
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200848 hdev = hci_dev_get(di.dev_id);
849 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 return -ENODEV;
851
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200852 hci_del_off_timer(hdev);
853
Johan Hedbergc542a062011-01-26 13:11:03 +0200854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 strcpy(di.name, hdev->name);
858 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100859 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 di.flags = hdev->flags;
861 di.pkt_type = hdev->pkt_type;
862 di.acl_mtu = hdev->acl_mtu;
863 di.acl_pkts = hdev->acl_pkts;
864 di.sco_mtu = hdev->sco_mtu;
865 di.sco_pkts = hdev->sco_pkts;
866 di.link_policy = hdev->link_policy;
867 di.link_mode = hdev->link_mode;
868
869 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870 memcpy(&di.features, &hdev->features, sizeof(di.features));
871
872 if (copy_to_user(arg, &di, sizeof(di)))
873 err = -EFAULT;
874
875 hci_dev_put(hdev);
876
877 return err;
878}
879
880/* ---- Interface to HCI drivers ---- */
881
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200882static int hci_rfkill_set_block(void *data, bool blocked)
883{
884 struct hci_dev *hdev = data;
885
886 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
887
888 if (!blocked)
889 return 0;
890
891 hci_dev_do_close(hdev);
892
893 return 0;
894}
895
896static const struct rfkill_ops hci_rfkill_ops = {
897 .set_block = hci_rfkill_set_block,
898};
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900/* Alloc HCI device */
901struct hci_dev *hci_alloc_dev(void)
902{
903 struct hci_dev *hdev;
904
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200905 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (!hdev)
907 return NULL;
908
David Herrmann0ac7e702011-10-08 14:58:47 +0200909 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200921 /* will free via device release */
922 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924EXPORT_SYMBOL(hci_free_dev);
925
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001005 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001006
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001007 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001008 if (bacmp(bdaddr, &k->bdaddr) == 0)
1009 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001010
1011 return NULL;
1012}
1013
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001014static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015 u8 key_type, u8 old_key_type)
1016{
1017 /* Legacy key */
1018 if (key_type < 0x03)
1019 return 1;
1020
1021 /* Debug keys are insecure so don't store them persistently */
1022 if (key_type == HCI_LK_DEBUG_COMBINATION)
1023 return 0;
1024
1025 /* Changed combination key and there's no previous one */
1026 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1027 return 0;
1028
1029 /* Security mode 3 case */
1030 if (!conn)
1031 return 1;
1032
1033 /* Neither local nor remote side had no-bonding as requirement */
1034 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1035 return 1;
1036
1037 /* Local side had dedicated bonding as requirement */
1038 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1039 return 1;
1040
1041 /* Remote side had dedicated bonding as requirement */
1042 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1043 return 1;
1044
1045 /* If none of the above criteria match, then don't store the key
1046 * persistently */
1047 return 0;
1048}
1049
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001050struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1051{
1052 struct link_key *k;
1053
1054 list_for_each_entry(k, &hdev->link_keys, list) {
1055 struct key_master_id *id;
1056
1057 if (k->type != HCI_LK_SMP_LTK)
1058 continue;
1059
1060 if (k->dlen != sizeof(*id))
1061 continue;
1062
1063 id = (void *) &k->data;
1064 if (id->ediv == ediv &&
1065 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1066 return k;
1067 }
1068
1069 return NULL;
1070}
1071EXPORT_SYMBOL(hci_find_ltk);
1072
1073struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074 bdaddr_t *bdaddr, u8 type)
1075{
1076 struct link_key *k;
1077
1078 list_for_each_entry(k, &hdev->link_keys, list)
1079 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1080 return k;
1081
1082 return NULL;
1083}
1084EXPORT_SYMBOL(hci_find_link_key_type);
1085
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001086int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001088{
1089 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001090 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001091
1092 old_key = hci_find_link_key(hdev, bdaddr);
1093 if (old_key) {
1094 old_key_type = old_key->type;
1095 key = old_key;
1096 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001097 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1099 if (!key)
1100 return -ENOMEM;
1101 list_add(&key->list, &hdev->link_keys);
1102 }
1103
1104 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1105
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001106 /* Some buggy controller combinations generate a changed
1107 * combination key for legacy pairing even when there's no
1108 * previous key */
1109 if (type == HCI_LK_CHANGED_COMBINATION &&
1110 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001111 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001112 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001113 if (conn)
1114 conn->key_type = type;
1115 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001116
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001117 bacpy(&key->bdaddr, bdaddr);
1118 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001119 key->pin_len = pin_len;
1120
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001121 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001122 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001123 else
1124 key->type = type;
1125
Johan Hedberg4df378a2011-04-28 11:29:03 -07001126 if (!new_key)
1127 return 0;
1128
1129 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1130
1131 mgmt_new_key(hdev->id, key, persistent);
1132
1133 if (!persistent) {
1134 list_del(&key->list);
1135 kfree(key);
1136 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001137
1138 return 0;
1139}
1140
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001141int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001142 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001143{
1144 struct link_key *key, *old_key;
1145 struct key_master_id *id;
1146 u8 old_key_type;
1147
1148 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1149
1150 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1151 if (old_key) {
1152 key = old_key;
1153 old_key_type = old_key->type;
1154 } else {
1155 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156 if (!key)
1157 return -ENOMEM;
1158 list_add(&key->list, &hdev->link_keys);
1159 old_key_type = 0xff;
1160 }
1161
1162 key->dlen = sizeof(*id);
1163
1164 bacpy(&key->bdaddr, bdaddr);
1165 memcpy(key->val, ltk, sizeof(key->val));
1166 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001167 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001168
1169 id = (void *) &key->data;
1170 id->ediv = ediv;
1171 memcpy(id->rand, rand, sizeof(id->rand));
1172
1173 if (new_key)
1174 mgmt_new_key(hdev->id, key, old_key_type);
1175
1176 return 0;
1177}
1178
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001179int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180{
1181 struct link_key *key;
1182
1183 key = hci_find_link_key(hdev, bdaddr);
1184 if (!key)
1185 return -ENOENT;
1186
1187 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189 list_del(&key->list);
1190 kfree(key);
1191
1192 return 0;
1193}
1194
Ville Tervo6bd32322011-02-16 16:32:41 +02001195/* HCI command timer function */
1196static void hci_cmd_timer(unsigned long arg)
1197{
1198 struct hci_dev *hdev = (void *) arg;
1199
1200 BT_ERR("%s command tx timeout", hdev->name);
1201 atomic_set(&hdev->cmd_cnt, 1);
1202 tasklet_schedule(&hdev->cmd_task);
1203}
1204
Szymon Janc2763eda2011-03-22 13:12:22 +01001205struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1206 bdaddr_t *bdaddr)
1207{
1208 struct oob_data *data;
1209
1210 list_for_each_entry(data, &hdev->remote_oob_data, list)
1211 if (bacmp(bdaddr, &data->bdaddr) == 0)
1212 return data;
1213
1214 return NULL;
1215}
1216
1217int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218{
1219 struct oob_data *data;
1220
1221 data = hci_find_remote_oob_data(hdev, bdaddr);
1222 if (!data)
1223 return -ENOENT;
1224
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227 list_del(&data->list);
1228 kfree(data);
1229
1230 return 0;
1231}
1232
1233int hci_remote_oob_data_clear(struct hci_dev *hdev)
1234{
1235 struct oob_data *data, *n;
1236
1237 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238 list_del(&data->list);
1239 kfree(data);
1240 }
1241
1242 return 0;
1243}
1244
1245int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1246 u8 *randomizer)
1247{
1248 struct oob_data *data;
1249
1250 data = hci_find_remote_oob_data(hdev, bdaddr);
1251
1252 if (!data) {
1253 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1254 if (!data)
1255 return -ENOMEM;
1256
1257 bacpy(&data->bdaddr, bdaddr);
1258 list_add(&data->list, &hdev->remote_oob_data);
1259 }
1260
1261 memcpy(data->hash, hash, sizeof(data->hash));
1262 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1263
1264 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1265
1266 return 0;
1267}
1268
Antti Julkub2a66aa2011-06-15 12:01:14 +03001269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1270 bdaddr_t *bdaddr)
1271{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001272 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001273
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001274 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001275 if (bacmp(bdaddr, &b->bdaddr) == 0)
1276 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001277
1278 return NULL;
1279}
1280
1281int hci_blacklist_clear(struct hci_dev *hdev)
1282{
1283 struct list_head *p, *n;
1284
1285 list_for_each_safe(p, n, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1287
1288 b = list_entry(p, struct bdaddr_list, list);
1289
1290 list_del(p);
1291 kfree(b);
1292 }
1293
1294 return 0;
1295}
1296
1297int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298{
1299 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001300
1301 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1302 return -EBADF;
1303
Antti Julku5e762442011-08-25 16:48:02 +03001304 if (hci_blacklist_lookup(hdev, bdaddr))
1305 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001306
1307 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001308 if (!entry)
1309 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001310
1311 bacpy(&entry->bdaddr, bdaddr);
1312
1313 list_add(&entry->list, &hdev->blacklist);
1314
Antti Julku5e762442011-08-25 16:48:02 +03001315 return mgmt_device_blocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001316}
1317
1318int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319{
1320 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001321
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001322 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
Antti Julku5e762442011-08-25 16:48:02 +03001323 return hci_blacklist_clear(hdev);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001324 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001325
1326 entry = hci_blacklist_lookup(hdev, bdaddr);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001327 if (!entry) {
Antti Julku5e762442011-08-25 16:48:02 +03001328 return -ENOENT;
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001329 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001330
1331 list_del(&entry->list);
1332 kfree(entry);
1333
Antti Julku5e762442011-08-25 16:48:02 +03001334 return mgmt_device_unblocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001335}
1336
Andre Guedes35815082011-05-26 16:23:53 -03001337static void hci_clear_adv_cache(unsigned long arg)
1338{
1339 struct hci_dev *hdev = (void *) arg;
1340
1341 hci_dev_lock(hdev);
1342
1343 hci_adv_entries_clear(hdev);
1344
1345 hci_dev_unlock(hdev);
1346}
1347
Andre Guedes76c86862011-05-26 16:23:50 -03001348int hci_adv_entries_clear(struct hci_dev *hdev)
1349{
1350 struct adv_entry *entry, *tmp;
1351
1352 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353 list_del(&entry->list);
1354 kfree(entry);
1355 }
1356
1357 BT_DBG("%s adv cache cleared", hdev->name);
1358
1359 return 0;
1360}
1361
1362struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363{
1364 struct adv_entry *entry;
1365
1366 list_for_each_entry(entry, &hdev->adv_entries, list)
1367 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1368 return entry;
1369
1370 return NULL;
1371}
1372
1373static inline int is_connectable_adv(u8 evt_type)
1374{
1375 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1376 return 1;
1377
1378 return 0;
1379}
1380
1381int hci_add_adv_entry(struct hci_dev *hdev,
1382 struct hci_ev_le_advertising_info *ev)
1383{
1384 struct adv_entry *entry;
1385
1386 if (!is_connectable_adv(ev->evt_type))
1387 return -EINVAL;
1388
1389 /* Only new entries should be added to adv_entries. So, if
1390 * bdaddr was found, don't add it. */
1391 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1392 return 0;
1393
1394 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1395 if (!entry)
1396 return -ENOMEM;
1397
1398 bacpy(&entry->bdaddr, &ev->bdaddr);
1399 entry->bdaddr_type = ev->bdaddr_type;
1400
1401 list_add(&entry->list, &hdev->adv_entries);
1402
1403 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404 batostr(&entry->bdaddr), entry->bdaddr_type);
1405
1406 return 0;
1407}
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409/* Register HCI device */
1410int hci_register_dev(struct hci_dev *hdev)
1411{
1412 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001413 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001415 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 if (!hdev->open || !hdev->close || !hdev->destruct)
1419 return -EINVAL;
1420
Mat Martineau08add512011-11-02 16:18:36 -07001421 /* Do not allow HCI_AMP devices to register at index 0,
1422 * so the index can be used as the AMP controller ID.
1423 */
1424 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 write_lock_bh(&hci_dev_list_lock);
1427
1428 /* Find first available device id */
1429 list_for_each(p, &hci_dev_list) {
1430 if (list_entry(p, struct hci_dev, list)->id != id)
1431 break;
1432 head = p; id++;
1433 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001434
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 sprintf(hdev->name, "hci%d", id);
1436 hdev->id = id;
1437 list_add(&hdev->list, head);
1438
1439 atomic_set(&hdev->refcnt, 1);
1440 spin_lock_init(&hdev->lock);
1441
1442 hdev->flags = 0;
1443 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001444 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001446 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Marcel Holtmann04837f62006-07-03 10:02:33 +02001448 hdev->idle_timeout = 0;
1449 hdev->sniff_max_interval = 800;
1450 hdev->sniff_min_interval = 80;
1451
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001452 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1454 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1455
1456 skb_queue_head_init(&hdev->rx_q);
1457 skb_queue_head_init(&hdev->cmd_q);
1458 skb_queue_head_init(&hdev->raw_q);
1459
Ville Tervo6bd32322011-02-16 16:32:41 +02001460 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1461
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301462 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001463 hdev->reassembly[i] = NULL;
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001466 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
1468 inquiry_cache_init(hdev);
1469
1470 hci_conn_hash_init(hdev);
1471
David Millerea4bd8b2010-07-30 21:54:49 -07001472 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001473
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001474 INIT_LIST_HEAD(&hdev->uuids);
1475
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001476 INIT_LIST_HEAD(&hdev->link_keys);
1477
Szymon Janc2763eda2011-03-22 13:12:22 +01001478 INIT_LIST_HEAD(&hdev->remote_oob_data);
1479
Andre Guedes76c86862011-05-26 16:23:50 -03001480 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001481 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1482 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001483
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001484 INIT_WORK(&hdev->power_on, hci_power_on);
1485 INIT_WORK(&hdev->power_off, hci_power_off);
1486 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1489
1490 atomic_set(&hdev->promisc, 0);
1491
1492 write_unlock_bh(&hci_dev_list_lock);
1493
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001494 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001495 if (!hdev->workqueue) {
1496 error = -ENOMEM;
1497 goto err;
1498 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001499
David Herrmann33ca9542011-10-08 14:58:49 +02001500 error = hci_add_sysfs(hdev);
1501 if (error < 0)
1502 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001504 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1505 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1506 if (hdev->rfkill) {
1507 if (rfkill_register(hdev->rfkill) < 0) {
1508 rfkill_destroy(hdev->rfkill);
1509 hdev->rfkill = NULL;
1510 }
1511 }
1512
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001513 set_bit(HCI_AUTO_OFF, &hdev->flags);
1514 set_bit(HCI_SETUP, &hdev->flags);
1515 queue_work(hdev->workqueue, &hdev->power_on);
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 hci_notify(hdev, HCI_DEV_REG);
1518
1519 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001520
David Herrmann33ca9542011-10-08 14:58:49 +02001521err_wqueue:
1522 destroy_workqueue(hdev->workqueue);
1523err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001524 write_lock_bh(&hci_dev_list_lock);
1525 list_del(&hdev->list);
1526 write_unlock_bh(&hci_dev_list_lock);
1527
David Herrmann33ca9542011-10-08 14:58:49 +02001528 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529}
1530EXPORT_SYMBOL(hci_register_dev);
1531
1532/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001533void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534{
Marcel Holtmannef222012007-07-11 06:42:04 +02001535 int i;
1536
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001537 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 write_lock_bh(&hci_dev_list_lock);
1540 list_del(&hdev->list);
1541 write_unlock_bh(&hci_dev_list_lock);
1542
1543 hci_dev_do_close(hdev);
1544
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301545 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001546 kfree_skb(hdev->reassembly[i]);
1547
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001548 if (!test_bit(HCI_INIT, &hdev->flags) &&
1549 !test_bit(HCI_SETUP, &hdev->flags))
1550 mgmt_index_removed(hdev->id);
1551
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 hci_notify(hdev, HCI_DEV_UNREG);
1553
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001554 if (hdev->rfkill) {
1555 rfkill_unregister(hdev->rfkill);
1556 rfkill_destroy(hdev->rfkill);
1557 }
1558
David Herrmannce242972011-10-08 14:58:48 +02001559 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001560
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001561 hci_del_off_timer(hdev);
Andre Guedes35815082011-05-26 16:23:53 -03001562 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001563
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001564 destroy_workqueue(hdev->workqueue);
1565
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001566 hci_dev_lock_bh(hdev);
1567 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001568 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001569 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001570 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001571 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001572 hci_dev_unlock_bh(hdev);
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575}
1576EXPORT_SYMBOL(hci_unregister_dev);
1577
1578/* Suspend HCI device */
1579int hci_suspend_dev(struct hci_dev *hdev)
1580{
1581 hci_notify(hdev, HCI_DEV_SUSPEND);
1582 return 0;
1583}
1584EXPORT_SYMBOL(hci_suspend_dev);
1585
1586/* Resume HCI device */
1587int hci_resume_dev(struct hci_dev *hdev)
1588{
1589 hci_notify(hdev, HCI_DEV_RESUME);
1590 return 0;
1591}
1592EXPORT_SYMBOL(hci_resume_dev);
1593
Marcel Holtmann76bca882009-11-18 00:40:39 +01001594/* Receive frame from HCI drivers */
1595int hci_recv_frame(struct sk_buff *skb)
1596{
1597 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1598 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1599 && !test_bit(HCI_INIT, &hdev->flags))) {
1600 kfree_skb(skb);
1601 return -ENXIO;
1602 }
1603
1604 /* Incomming skb */
1605 bt_cb(skb)->incoming = 1;
1606
1607 /* Time stamp */
1608 __net_timestamp(skb);
1609
1610 /* Queue frame for rx task */
1611 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001612 tasklet_schedule(&hdev->rx_task);
1613
Marcel Holtmann76bca882009-11-18 00:40:39 +01001614 return 0;
1615}
1616EXPORT_SYMBOL(hci_recv_frame);
1617
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301618static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001619 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301620{
1621 int len = 0;
1622 int hlen = 0;
1623 int remain = count;
1624 struct sk_buff *skb;
1625 struct bt_skb_cb *scb;
1626
1627 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1628 index >= NUM_REASSEMBLY)
1629 return -EILSEQ;
1630
1631 skb = hdev->reassembly[index];
1632
1633 if (!skb) {
1634 switch (type) {
1635 case HCI_ACLDATA_PKT:
1636 len = HCI_MAX_FRAME_SIZE;
1637 hlen = HCI_ACL_HDR_SIZE;
1638 break;
1639 case HCI_EVENT_PKT:
1640 len = HCI_MAX_EVENT_SIZE;
1641 hlen = HCI_EVENT_HDR_SIZE;
1642 break;
1643 case HCI_SCODATA_PKT:
1644 len = HCI_MAX_SCO_SIZE;
1645 hlen = HCI_SCO_HDR_SIZE;
1646 break;
1647 }
1648
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001649 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301650 if (!skb)
1651 return -ENOMEM;
1652
1653 scb = (void *) skb->cb;
1654 scb->expect = hlen;
1655 scb->pkt_type = type;
1656
1657 skb->dev = (void *) hdev;
1658 hdev->reassembly[index] = skb;
1659 }
1660
1661 while (count) {
1662 scb = (void *) skb->cb;
1663 len = min(scb->expect, (__u16)count);
1664
1665 memcpy(skb_put(skb, len), data, len);
1666
1667 count -= len;
1668 data += len;
1669 scb->expect -= len;
1670 remain = count;
1671
1672 switch (type) {
1673 case HCI_EVENT_PKT:
1674 if (skb->len == HCI_EVENT_HDR_SIZE) {
1675 struct hci_event_hdr *h = hci_event_hdr(skb);
1676 scb->expect = h->plen;
1677
1678 if (skb_tailroom(skb) < scb->expect) {
1679 kfree_skb(skb);
1680 hdev->reassembly[index] = NULL;
1681 return -ENOMEM;
1682 }
1683 }
1684 break;
1685
1686 case HCI_ACLDATA_PKT:
1687 if (skb->len == HCI_ACL_HDR_SIZE) {
1688 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1689 scb->expect = __le16_to_cpu(h->dlen);
1690
1691 if (skb_tailroom(skb) < scb->expect) {
1692 kfree_skb(skb);
1693 hdev->reassembly[index] = NULL;
1694 return -ENOMEM;
1695 }
1696 }
1697 break;
1698
1699 case HCI_SCODATA_PKT:
1700 if (skb->len == HCI_SCO_HDR_SIZE) {
1701 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1702 scb->expect = h->dlen;
1703
1704 if (skb_tailroom(skb) < scb->expect) {
1705 kfree_skb(skb);
1706 hdev->reassembly[index] = NULL;
1707 return -ENOMEM;
1708 }
1709 }
1710 break;
1711 }
1712
1713 if (scb->expect == 0) {
1714 /* Complete frame */
1715
1716 bt_cb(skb)->pkt_type = type;
1717 hci_recv_frame(skb);
1718
1719 hdev->reassembly[index] = NULL;
1720 return remain;
1721 }
1722 }
1723
1724 return remain;
1725}
1726
Marcel Holtmannef222012007-07-11 06:42:04 +02001727int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1728{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301729 int rem = 0;
1730
Marcel Holtmannef222012007-07-11 06:42:04 +02001731 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1732 return -EILSEQ;
1733
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001734 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001735 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301736 if (rem < 0)
1737 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001738
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301739 data += (count - rem);
1740 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001741 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001742
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301743 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001744}
1745EXPORT_SYMBOL(hci_recv_fragment);
1746
Suraj Sumangala99811512010-07-14 13:02:19 +05301747#define STREAM_REASSEMBLY 0
1748
1749int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1750{
1751 int type;
1752 int rem = 0;
1753
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001754 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301755 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1756
1757 if (!skb) {
1758 struct { char type; } *pkt;
1759
1760 /* Start of the frame */
1761 pkt = data;
1762 type = pkt->type;
1763
1764 data++;
1765 count--;
1766 } else
1767 type = bt_cb(skb)->pkt_type;
1768
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001769 rem = hci_reassembly(hdev, type, data, count,
1770 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301771 if (rem < 0)
1772 return rem;
1773
1774 data += (count - rem);
1775 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001776 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301777
1778 return rem;
1779}
1780EXPORT_SYMBOL(hci_recv_stream_fragment);
1781
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782/* ---- Interface to upper protocols ---- */
1783
1784/* Register/Unregister protocols.
1785 * hci_task_lock is used to ensure that no tasks are running. */
1786int hci_register_proto(struct hci_proto *hp)
1787{
1788 int err = 0;
1789
1790 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1791
1792 if (hp->id >= HCI_MAX_PROTO)
1793 return -EINVAL;
1794
1795 write_lock_bh(&hci_task_lock);
1796
1797 if (!hci_proto[hp->id])
1798 hci_proto[hp->id] = hp;
1799 else
1800 err = -EEXIST;
1801
1802 write_unlock_bh(&hci_task_lock);
1803
1804 return err;
1805}
1806EXPORT_SYMBOL(hci_register_proto);
1807
1808int hci_unregister_proto(struct hci_proto *hp)
1809{
1810 int err = 0;
1811
1812 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1813
1814 if (hp->id >= HCI_MAX_PROTO)
1815 return -EINVAL;
1816
1817 write_lock_bh(&hci_task_lock);
1818
1819 if (hci_proto[hp->id])
1820 hci_proto[hp->id] = NULL;
1821 else
1822 err = -ENOENT;
1823
1824 write_unlock_bh(&hci_task_lock);
1825
1826 return err;
1827}
1828EXPORT_SYMBOL(hci_unregister_proto);
1829
1830int hci_register_cb(struct hci_cb *cb)
1831{
1832 BT_DBG("%p name %s", cb, cb->name);
1833
1834 write_lock_bh(&hci_cb_list_lock);
1835 list_add(&cb->list, &hci_cb_list);
1836 write_unlock_bh(&hci_cb_list_lock);
1837
1838 return 0;
1839}
1840EXPORT_SYMBOL(hci_register_cb);
1841
1842int hci_unregister_cb(struct hci_cb *cb)
1843{
1844 BT_DBG("%p name %s", cb, cb->name);
1845
1846 write_lock_bh(&hci_cb_list_lock);
1847 list_del(&cb->list);
1848 write_unlock_bh(&hci_cb_list_lock);
1849
1850 return 0;
1851}
1852EXPORT_SYMBOL(hci_unregister_cb);
1853
1854static int hci_send_frame(struct sk_buff *skb)
1855{
1856 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1857
1858 if (!hdev) {
1859 kfree_skb(skb);
1860 return -ENODEV;
1861 }
1862
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001863 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864
1865 if (atomic_read(&hdev->promisc)) {
1866 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001867 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001869 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870 }
1871
1872 /* Get rid of skb owner, prior to sending to the driver. */
1873 skb_orphan(skb);
1874
1875 return hdev->send(skb);
1876}
1877
1878/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001879int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
1881 int len = HCI_COMMAND_HDR_SIZE + plen;
1882 struct hci_command_hdr *hdr;
1883 struct sk_buff *skb;
1884
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001885 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 skb = bt_skb_alloc(len, GFP_ATOMIC);
1888 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001889 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 return -ENOMEM;
1891 }
1892
1893 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001894 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 hdr->plen = plen;
1896
1897 if (plen)
1898 memcpy(skb_put(skb, plen), param, plen);
1899
1900 BT_DBG("skb len %d", skb->len);
1901
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001902 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001904
Johan Hedberga5040ef2011-01-10 13:28:59 +02001905 if (test_bit(HCI_INIT, &hdev->flags))
1906 hdev->init_last_cmd = opcode;
1907
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001909 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910
1911 return 0;
1912}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
1914/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001915void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 struct hci_command_hdr *hdr;
1918
1919 if (!hdev->sent_cmd)
1920 return NULL;
1921
1922 hdr = (void *) hdev->sent_cmd->data;
1923
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001924 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 return NULL;
1926
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001927 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1930}
1931
1932/* Send ACL data */
1933static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1934{
1935 struct hci_acl_hdr *hdr;
1936 int len = skb->len;
1937
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001938 skb_push(skb, HCI_ACL_HDR_SIZE);
1939 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001940 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001941 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1942 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001945static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1946 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947{
1948 struct hci_dev *hdev = conn->hdev;
1949 struct sk_buff *list;
1950
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001951 list = skb_shinfo(skb)->frag_list;
1952 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 /* Non fragmented */
1954 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1955
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001956 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 } else {
1958 /* Fragmented */
1959 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1960
1961 skb_shinfo(skb)->frag_list = NULL;
1962
1963 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001964 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001966 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001967
1968 flags &= ~ACL_START;
1969 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 do {
1971 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001972
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001974 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001975 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1978
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001979 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 } while (list);
1981
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001982 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001984}
1985
1986void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1987{
1988 struct hci_conn *conn = chan->conn;
1989 struct hci_dev *hdev = conn->hdev;
1990
1991 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1992
1993 skb->dev = (void *) hdev;
1994 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1995 hci_add_acl_hdr(skb, conn->handle, flags);
1996
1997 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001999 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000}
2001EXPORT_SYMBOL(hci_send_acl);
2002
2003/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002004void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005{
2006 struct hci_dev *hdev = conn->hdev;
2007 struct hci_sco_hdr hdr;
2008
2009 BT_DBG("%s len %d", hdev->name, skb->len);
2010
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002011 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012 hdr.dlen = skb->len;
2013
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002014 skb_push(skb, HCI_SCO_HDR_SIZE);
2015 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002016 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002019 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002022 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023}
2024EXPORT_SYMBOL(hci_send_sco);
2025
2026/* ---- HCI TX task (outgoing data) ---- */
2027
2028/* HCI Connection scheduler */
2029static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2030{
2031 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002032 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002035 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002037 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002038 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002040
2041 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2042 continue;
2043
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 num++;
2045
2046 if (c->sent < min) {
2047 min = c->sent;
2048 conn = c;
2049 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002050
2051 if (hci_conn_num(hdev, type) == num)
2052 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 }
2054
2055 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002056 int cnt, q;
2057
2058 switch (conn->type) {
2059 case ACL_LINK:
2060 cnt = hdev->acl_cnt;
2061 break;
2062 case SCO_LINK:
2063 case ESCO_LINK:
2064 cnt = hdev->sco_cnt;
2065 break;
2066 case LE_LINK:
2067 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2068 break;
2069 default:
2070 cnt = 0;
2071 BT_ERR("Unknown link type");
2072 }
2073
2074 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 *quote = q ? q : 1;
2076 } else
2077 *quote = 0;
2078
2079 BT_DBG("conn %p quote %d", conn, *quote);
2080 return conn;
2081}
2082
Ville Tervobae1f5d2011-02-10 22:38:53 -03002083static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
2085 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002086 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Ville Tervobae1f5d2011-02-10 22:38:53 -03002088 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089
2090 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002091 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002092 if (c->type == type && c->sent) {
2093 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 hdev->name, batostr(&c->dst));
2095 hci_acl_disconn(c, 0x13);
2096 }
2097 }
2098}
2099
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002100static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2101 int *quote)
2102{
2103 struct hci_conn_hash *h = &hdev->conn_hash;
2104 struct hci_chan *chan = NULL;
2105 int num = 0, min = ~0, cur_prio = 0;
2106 struct hci_conn *conn;
2107 int cnt, q, conn_num = 0;
2108
2109 BT_DBG("%s", hdev->name);
2110
2111 list_for_each_entry(conn, &h->list, list) {
2112 struct hci_chan_hash *ch;
2113 struct hci_chan *tmp;
2114
2115 if (conn->type != type)
2116 continue;
2117
2118 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2119 continue;
2120
2121 conn_num++;
2122
2123 ch = &conn->chan_hash;
2124
2125 list_for_each_entry(tmp, &ch->list, list) {
2126 struct sk_buff *skb;
2127
2128 if (skb_queue_empty(&tmp->data_q))
2129 continue;
2130
2131 skb = skb_peek(&tmp->data_q);
2132 if (skb->priority < cur_prio)
2133 continue;
2134
2135 if (skb->priority > cur_prio) {
2136 num = 0;
2137 min = ~0;
2138 cur_prio = skb->priority;
2139 }
2140
2141 num++;
2142
2143 if (conn->sent < min) {
2144 min = conn->sent;
2145 chan = tmp;
2146 }
2147 }
2148
2149 if (hci_conn_num(hdev, type) == conn_num)
2150 break;
2151 }
2152
2153 if (!chan)
2154 return NULL;
2155
2156 switch (chan->conn->type) {
2157 case ACL_LINK:
2158 cnt = hdev->acl_cnt;
2159 break;
2160 case SCO_LINK:
2161 case ESCO_LINK:
2162 cnt = hdev->sco_cnt;
2163 break;
2164 case LE_LINK:
2165 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2166 break;
2167 default:
2168 cnt = 0;
2169 BT_ERR("Unknown link type");
2170 }
2171
2172 q = cnt / num;
2173 *quote = q ? q : 1;
2174 BT_DBG("chan %p quote %d", chan, *quote);
2175 return chan;
2176}
2177
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002178static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2179{
2180 struct hci_conn_hash *h = &hdev->conn_hash;
2181 struct hci_conn *conn;
2182 int num = 0;
2183
2184 BT_DBG("%s", hdev->name);
2185
2186 list_for_each_entry(conn, &h->list, list) {
2187 struct hci_chan_hash *ch;
2188 struct hci_chan *chan;
2189
2190 if (conn->type != type)
2191 continue;
2192
2193 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2194 continue;
2195
2196 num++;
2197
2198 ch = &conn->chan_hash;
2199 list_for_each_entry(chan, &ch->list, list) {
2200 struct sk_buff *skb;
2201
2202 if (chan->sent) {
2203 chan->sent = 0;
2204 continue;
2205 }
2206
2207 if (skb_queue_empty(&chan->data_q))
2208 continue;
2209
2210 skb = skb_peek(&chan->data_q);
2211 if (skb->priority >= HCI_PRIO_MAX - 1)
2212 continue;
2213
2214 skb->priority = HCI_PRIO_MAX - 1;
2215
2216 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2217 skb->priority);
2218 }
2219
2220 if (hci_conn_num(hdev, type) == num)
2221 break;
2222 }
2223}
2224
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225static inline void hci_sched_acl(struct hci_dev *hdev)
2226{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002227 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 struct sk_buff *skb;
2229 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002230 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 BT_DBG("%s", hdev->name);
2233
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002234 if (!hci_conn_num(hdev, ACL_LINK))
2235 return;
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 if (!test_bit(HCI_RAW, &hdev->flags)) {
2238 /* ACL tx timeout must be longer than maximum
2239 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002240 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002241 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 }
2243
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002244 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002245
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002246 while (hdev->acl_cnt &&
2247 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002248 u32 priority = (skb_peek(&chan->data_q))->priority;
2249 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002250 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2251 skb->len, skb->priority);
2252
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002253 /* Stop if priority has changed */
2254 if (skb->priority < priority)
2255 break;
2256
2257 skb = skb_dequeue(&chan->data_q);
2258
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002259 hci_conn_enter_active_mode(chan->conn,
2260 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 hci_send_frame(skb);
2263 hdev->acl_last_tx = jiffies;
2264
2265 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002266 chan->sent++;
2267 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 }
2269 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002270
2271 if (cnt != hdev->acl_cnt)
2272 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273}
2274
2275/* Schedule SCO */
2276static inline void hci_sched_sco(struct hci_dev *hdev)
2277{
2278 struct hci_conn *conn;
2279 struct sk_buff *skb;
2280 int quote;
2281
2282 BT_DBG("%s", hdev->name);
2283
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002284 if (!hci_conn_num(hdev, SCO_LINK))
2285 return;
2286
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2288 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2289 BT_DBG("skb %p len %d", skb, skb->len);
2290 hci_send_frame(skb);
2291
2292 conn->sent++;
2293 if (conn->sent == ~0)
2294 conn->sent = 0;
2295 }
2296 }
2297}
2298
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002299static inline void hci_sched_esco(struct hci_dev *hdev)
2300{
2301 struct hci_conn *conn;
2302 struct sk_buff *skb;
2303 int quote;
2304
2305 BT_DBG("%s", hdev->name);
2306
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002307 if (!hci_conn_num(hdev, ESCO_LINK))
2308 return;
2309
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002310 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2311 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2312 BT_DBG("skb %p len %d", skb, skb->len);
2313 hci_send_frame(skb);
2314
2315 conn->sent++;
2316 if (conn->sent == ~0)
2317 conn->sent = 0;
2318 }
2319 }
2320}
2321
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002322static inline void hci_sched_le(struct hci_dev *hdev)
2323{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002324 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002325 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002326 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002327
2328 BT_DBG("%s", hdev->name);
2329
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002330 if (!hci_conn_num(hdev, LE_LINK))
2331 return;
2332
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002333 if (!test_bit(HCI_RAW, &hdev->flags)) {
2334 /* LE tx timeout must be longer than maximum
2335 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002336 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002337 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002338 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002339 }
2340
2341 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002342 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002343 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002344 u32 priority = (skb_peek(&chan->data_q))->priority;
2345 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002346 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2347 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002348
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002349 /* Stop if priority has changed */
2350 if (skb->priority < priority)
2351 break;
2352
2353 skb = skb_dequeue(&chan->data_q);
2354
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002355 hci_send_frame(skb);
2356 hdev->le_last_tx = jiffies;
2357
2358 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002359 chan->sent++;
2360 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002361 }
2362 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002363
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002364 if (hdev->le_pkts)
2365 hdev->le_cnt = cnt;
2366 else
2367 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002368
2369 if (cnt != tmp)
2370 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002371}
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373static void hci_tx_task(unsigned long arg)
2374{
2375 struct hci_dev *hdev = (struct hci_dev *) arg;
2376 struct sk_buff *skb;
2377
2378 read_lock(&hci_task_lock);
2379
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002380 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2381 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
2383 /* Schedule queues and send stuff to HCI driver */
2384
2385 hci_sched_acl(hdev);
2386
2387 hci_sched_sco(hdev);
2388
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002389 hci_sched_esco(hdev);
2390
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002391 hci_sched_le(hdev);
2392
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393 /* Send next queued raw (unknown type) packet */
2394 while ((skb = skb_dequeue(&hdev->raw_q)))
2395 hci_send_frame(skb);
2396
2397 read_unlock(&hci_task_lock);
2398}
2399
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002400/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
2402/* ACL data packet */
2403static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2404{
2405 struct hci_acl_hdr *hdr = (void *) skb->data;
2406 struct hci_conn *conn;
2407 __u16 handle, flags;
2408
2409 skb_pull(skb, HCI_ACL_HDR_SIZE);
2410
2411 handle = __le16_to_cpu(hdr->handle);
2412 flags = hci_flags(handle);
2413 handle = hci_handle(handle);
2414
2415 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2416
2417 hdev->stat.acl_rx++;
2418
2419 hci_dev_lock(hdev);
2420 conn = hci_conn_hash_lookup_handle(hdev, handle);
2421 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002422
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 if (conn) {
2424 register struct hci_proto *hp;
2425
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002426 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002427
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002429 hp = hci_proto[HCI_PROTO_L2CAP];
2430 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 hp->recv_acldata(conn, skb, flags);
2432 return;
2433 }
2434 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002435 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 hdev->name, handle);
2437 }
2438
2439 kfree_skb(skb);
2440}
2441
2442/* SCO data packet */
2443static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2444{
2445 struct hci_sco_hdr *hdr = (void *) skb->data;
2446 struct hci_conn *conn;
2447 __u16 handle;
2448
2449 skb_pull(skb, HCI_SCO_HDR_SIZE);
2450
2451 handle = __le16_to_cpu(hdr->handle);
2452
2453 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2454
2455 hdev->stat.sco_rx++;
2456
2457 hci_dev_lock(hdev);
2458 conn = hci_conn_hash_lookup_handle(hdev, handle);
2459 hci_dev_unlock(hdev);
2460
2461 if (conn) {
2462 register struct hci_proto *hp;
2463
2464 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002465 hp = hci_proto[HCI_PROTO_SCO];
2466 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 hp->recv_scodata(conn, skb);
2468 return;
2469 }
2470 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002471 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 hdev->name, handle);
2473 }
2474
2475 kfree_skb(skb);
2476}
2477
Marcel Holtmann65164552005-10-28 19:20:48 +02002478static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479{
2480 struct hci_dev *hdev = (struct hci_dev *) arg;
2481 struct sk_buff *skb;
2482
2483 BT_DBG("%s", hdev->name);
2484
2485 read_lock(&hci_task_lock);
2486
2487 while ((skb = skb_dequeue(&hdev->rx_q))) {
2488 if (atomic_read(&hdev->promisc)) {
2489 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002490 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 }
2492
2493 if (test_bit(HCI_RAW, &hdev->flags)) {
2494 kfree_skb(skb);
2495 continue;
2496 }
2497
2498 if (test_bit(HCI_INIT, &hdev->flags)) {
2499 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002500 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 case HCI_ACLDATA_PKT:
2502 case HCI_SCODATA_PKT:
2503 kfree_skb(skb);
2504 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507
2508 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002509 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 case HCI_EVENT_PKT:
2511 hci_event_packet(hdev, skb);
2512 break;
2513
2514 case HCI_ACLDATA_PKT:
2515 BT_DBG("%s ACL data packet", hdev->name);
2516 hci_acldata_packet(hdev, skb);
2517 break;
2518
2519 case HCI_SCODATA_PKT:
2520 BT_DBG("%s SCO data packet", hdev->name);
2521 hci_scodata_packet(hdev, skb);
2522 break;
2523
2524 default:
2525 kfree_skb(skb);
2526 break;
2527 }
2528 }
2529
2530 read_unlock(&hci_task_lock);
2531}
2532
2533static void hci_cmd_task(unsigned long arg)
2534{
2535 struct hci_dev *hdev = (struct hci_dev *) arg;
2536 struct sk_buff *skb;
2537
2538 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2539
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002541 if (atomic_read(&hdev->cmd_cnt)) {
2542 skb = skb_dequeue(&hdev->cmd_q);
2543 if (!skb)
2544 return;
2545
Wei Yongjun7585b972009-02-25 18:29:52 +08002546 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002548 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2549 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 atomic_dec(&hdev->cmd_cnt);
2551 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002552 if (test_bit(HCI_RESET, &hdev->flags))
2553 del_timer(&hdev->cmd_timer);
2554 else
2555 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002556 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 } else {
2558 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002559 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 }
2561 }
2562}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002563
2564int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2565{
2566 /* General inquiry access code (GIAC) */
2567 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2568 struct hci_cp_inquiry cp;
2569
2570 BT_DBG("%s", hdev->name);
2571
2572 if (test_bit(HCI_INQUIRY, &hdev->flags))
2573 return -EINPROGRESS;
2574
2575 memset(&cp, 0, sizeof(cp));
2576 memcpy(&cp.lap, lap, sizeof(cp.lap));
2577 cp.length = length;
2578
2579 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2580}