blob: 2da3f907e9b7130f49a53e4089153a2a8380845f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080076static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
Alan Sterne041c682006-03-27 01:16:30 -080087 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Marcel Holtmann65164552005-10-28 19:20:48 +020090static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Alan Sterne041c682006-03-27 01:16:30 -080092 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/* ---- HCI requests ---- */
96
Johan Hedberg23bb5762010-12-21 23:01:27 +020097void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Johan Hedberg23bb5762010-12-21 23:01:27 +020099 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
Johan Hedberga5040ef2011-01-10 13:28:59 +0200101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200225 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241#endif
242
243 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200258 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
Marcel Holtmanna418b892008-11-30 12:17:28 +0100312 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900318/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200322 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200330 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200381
382 ie->next = cache->list;
383 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
Marcel Holtmann04837f62006-07-03 10:02:33 +0200455 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200470 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900486 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200503 hdev = hci_dev_get(dev);
504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
Marcel Holtmann943da252010-02-13 02:28:41 +0100524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200536 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Marcel Holtmann04837f62006-07-03 10:02:33 +0200538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Andre Guedeseead27d2011-06-30 19:20:55 -0300541 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300589 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200598 if (hdev->discov_timeout > 0) {
599 cancel_delayed_work_sync(&hdev->discov_off);
600 hdev->discov_timeout = 0;
601 }
602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 hci_dev_lock_bh(hdev);
604 inquiry_cache_flush(hdev);
605 hci_conn_hash_flush(hdev);
606 hci_dev_unlock_bh(hdev);
607
608 hci_notify(hdev, HCI_DEV_DOWN);
609
610 if (hdev->flush)
611 hdev->flush(hdev);
612
613 /* Reset device */
614 skb_queue_purge(&hdev->cmd_q);
615 atomic_set(&hdev->cmd_cnt, 1);
616 if (!test_bit(HCI_RAW, &hdev->flags)) {
617 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200618 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200619 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 clear_bit(HCI_INIT, &hdev->flags);
621 }
622
623 /* Kill cmd task */
624 tasklet_kill(&hdev->cmd_task);
625
626 /* Drop queues */
627 skb_queue_purge(&hdev->rx_q);
628 skb_queue_purge(&hdev->cmd_q);
629 skb_queue_purge(&hdev->raw_q);
630
631 /* Drop last sent command */
632 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300633 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 kfree_skb(hdev->sent_cmd);
635 hdev->sent_cmd = NULL;
636 }
637
638 /* After this point our queues are empty
639 * and no tasks are scheduled. */
640 hdev->close(hdev);
641
Johan Hedberg5add6af2010-12-16 10:00:37 +0200642 mgmt_powered(hdev->id, 0);
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 /* Clear flags */
645 hdev->flags = 0;
646
647 hci_req_unlock(hdev);
648
649 hci_dev_put(hdev);
650 return 0;
651}
652
653int hci_dev_close(__u16 dev)
654{
655 struct hci_dev *hdev;
656 int err;
657
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200658 hdev = hci_dev_get(dev);
659 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 return -ENODEV;
661 err = hci_dev_do_close(hdev);
662 hci_dev_put(hdev);
663 return err;
664}
665
666int hci_dev_reset(__u16 dev)
667{
668 struct hci_dev *hdev;
669 int ret = 0;
670
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200671 hdev = hci_dev_get(dev);
672 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 return -ENODEV;
674
675 hci_req_lock(hdev);
676 tasklet_disable(&hdev->tx_task);
677
678 if (!test_bit(HCI_UP, &hdev->flags))
679 goto done;
680
681 /* Drop queues */
682 skb_queue_purge(&hdev->rx_q);
683 skb_queue_purge(&hdev->cmd_q);
684
685 hci_dev_lock_bh(hdev);
686 inquiry_cache_flush(hdev);
687 hci_conn_hash_flush(hdev);
688 hci_dev_unlock_bh(hdev);
689
690 if (hdev->flush)
691 hdev->flush(hdev);
692
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900693 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300694 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200697 ret = __hci_request(hdev, hci_reset_req, 0,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700done:
701 tasklet_enable(&hdev->tx_task);
702 hci_req_unlock(hdev);
703 hci_dev_put(hdev);
704 return ret;
705}
706
707int hci_dev_reset_stat(__u16 dev)
708{
709 struct hci_dev *hdev;
710 int ret = 0;
711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200712 hdev = hci_dev_get(dev);
713 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return -ENODEV;
715
716 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
717
718 hci_dev_put(hdev);
719
720 return ret;
721}
722
723int hci_dev_cmd(unsigned int cmd, void __user *arg)
724{
725 struct hci_dev *hdev;
726 struct hci_dev_req dr;
727 int err = 0;
728
729 if (copy_from_user(&dr, arg, sizeof(dr)))
730 return -EFAULT;
731
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200732 hdev = hci_dev_get(dr.dev_id);
733 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 return -ENODEV;
735
736 switch (cmd) {
737 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200738 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741
742 case HCISETENCRYPT:
743 if (!lmp_encrypt_capable(hdev)) {
744 err = -EOPNOTSUPP;
745 break;
746 }
747
748 if (!test_bit(HCI_AUTH, &hdev->flags)) {
749 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200750 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
751 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 if (err)
753 break;
754 }
755
Marcel Holtmann04837f62006-07-03 10:02:33 +0200756 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 break;
759
760 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200761 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 break;
764
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200765 case HCISETLINKPOL:
766 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
767 msecs_to_jiffies(HCI_INIT_TIMEOUT));
768 break;
769
770 case HCISETLINKMODE:
771 hdev->link_mode = ((__u16) dr.dev_opt) &
772 (HCI_LM_MASTER | HCI_LM_ACCEPT);
773 break;
774
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 case HCISETPTYPE:
776 hdev->pkt_type = (__u16) dr.dev_opt;
777 break;
778
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200780 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 break;
783
784 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200785 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
786 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 break;
788
789 default:
790 err = -EINVAL;
791 break;
792 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200793
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 hci_dev_put(hdev);
795 return err;
796}
797
798int hci_get_dev_list(void __user *arg)
799{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200800 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 struct hci_dev_list_req *dl;
802 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 int n = 0, size, err;
804 __u16 dev_num;
805
806 if (get_user(dev_num, (__u16 __user *) arg))
807 return -EFAULT;
808
809 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
810 return -EINVAL;
811
812 size = sizeof(*dl) + dev_num * sizeof(*dr);
813
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200814 dl = kzalloc(size, GFP_KERNEL);
815 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 return -ENOMEM;
817
818 dr = dl->dev_req;
819
820 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200821 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200822 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200823
824 if (!test_bit(HCI_MGMT, &hdev->flags))
825 set_bit(HCI_PAIRABLE, &hdev->flags);
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 (dr + n)->dev_id = hdev->id;
828 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (++n >= dev_num)
831 break;
832 }
833 read_unlock_bh(&hci_dev_list_lock);
834
835 dl->dev_num = n;
836 size = sizeof(*dl) + n * sizeof(*dr);
837
838 err = copy_to_user(arg, dl, size);
839 kfree(dl);
840
841 return err ? -EFAULT : 0;
842}
843
844int hci_get_dev_info(void __user *arg)
845{
846 struct hci_dev *hdev;
847 struct hci_dev_info di;
848 int err = 0;
849
850 if (copy_from_user(&di, arg, sizeof(di)))
851 return -EFAULT;
852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 hdev = hci_dev_get(di.dev_id);
854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 return -ENODEV;
856
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200857 hci_del_off_timer(hdev);
858
Johan Hedbergc542a062011-01-26 13:11:03 +0200859 if (!test_bit(HCI_MGMT, &hdev->flags))
860 set_bit(HCI_PAIRABLE, &hdev->flags);
861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 strcpy(di.name, hdev->name);
863 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100864 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 di.flags = hdev->flags;
866 di.pkt_type = hdev->pkt_type;
867 di.acl_mtu = hdev->acl_mtu;
868 di.acl_pkts = hdev->acl_pkts;
869 di.sco_mtu = hdev->sco_mtu;
870 di.sco_pkts = hdev->sco_pkts;
871 di.link_policy = hdev->link_policy;
872 di.link_mode = hdev->link_mode;
873
874 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875 memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877 if (copy_to_user(arg, &di, sizeof(di)))
878 err = -EFAULT;
879
880 hci_dev_put(hdev);
881
882 return err;
883}
884
885/* ---- Interface to HCI drivers ---- */
886
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200887static int hci_rfkill_set_block(void *data, bool blocked)
888{
889 struct hci_dev *hdev = data;
890
891 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893 if (!blocked)
894 return 0;
895
896 hci_dev_do_close(hdev);
897
898 return 0;
899}
900
901static const struct rfkill_ops hci_rfkill_ops = {
902 .set_block = hci_rfkill_set_block,
903};
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905/* Alloc HCI device */
906struct hci_dev *hci_alloc_dev(void)
907{
908 struct hci_dev *hdev;
909
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200910 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if (!hdev)
912 return NULL;
913
David Herrmann0ac7e702011-10-08 14:58:47 +0200914 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921/* Free HCI device */
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200926 /* will free via device release */
927 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928}
929EXPORT_SYMBOL(hci_free_dev);
930
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id);
946}
947
948static void hci_power_off(struct work_struct *work)
949{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952 BT_DBG("%s", hdev->name);
953
954 hci_dev_close(hdev->id);
955}
956
957static void hci_auto_off(unsigned long data)
958{
959 struct hci_dev *hdev = (struct hci_dev *) data;
960
961 BT_DBG("%s", hdev->name);
962
963 clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965 queue_work(hdev->workqueue, &hdev->power_off);
966}
967
968void hci_del_off_timer(struct hci_dev *hdev)
969{
970 BT_DBG("%s", hdev->name);
971
972 clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 del_timer(&hdev->off_timer);
974}
975
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200976static void hci_discov_off(struct work_struct *work)
977{
978 struct hci_dev *hdev;
979 u8 scan = SCAN_PAGE;
980
981 hdev = container_of(work, struct hci_dev, discov_off.work);
982
983 BT_DBG("%s", hdev->name);
984
985 hci_dev_lock_bh(hdev);
986
987 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
988
989 hdev->discov_timeout = 0;
990
991 hci_dev_unlock_bh(hdev);
992}
993
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200994int hci_uuids_clear(struct hci_dev *hdev)
995{
996 struct list_head *p, *n;
997
998 list_for_each_safe(p, n, &hdev->uuids) {
999 struct bt_uuid *uuid;
1000
1001 uuid = list_entry(p, struct bt_uuid, list);
1002
1003 list_del(p);
1004 kfree(uuid);
1005 }
1006
1007 return 0;
1008}
1009
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001010int hci_link_keys_clear(struct hci_dev *hdev)
1011{
1012 struct list_head *p, *n;
1013
1014 list_for_each_safe(p, n, &hdev->link_keys) {
1015 struct link_key *key;
1016
1017 key = list_entry(p, struct link_key, list);
1018
1019 list_del(p);
1020 kfree(key);
1021 }
1022
1023 return 0;
1024}
1025
1026struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1027{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001028 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001029
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001030 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001031 if (bacmp(bdaddr, &k->bdaddr) == 0)
1032 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001033
1034 return NULL;
1035}
1036
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001037static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1038 u8 key_type, u8 old_key_type)
1039{
1040 /* Legacy key */
1041 if (key_type < 0x03)
1042 return 1;
1043
1044 /* Debug keys are insecure so don't store them persistently */
1045 if (key_type == HCI_LK_DEBUG_COMBINATION)
1046 return 0;
1047
1048 /* Changed combination key and there's no previous one */
1049 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1050 return 0;
1051
1052 /* Security mode 3 case */
1053 if (!conn)
1054 return 1;
1055
1056 /* Neither local nor remote side had no-bonding as requirement */
1057 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1058 return 1;
1059
1060 /* Local side had dedicated bonding as requirement */
1061 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1062 return 1;
1063
1064 /* Remote side had dedicated bonding as requirement */
1065 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1066 return 1;
1067
1068 /* If none of the above criteria match, then don't store the key
1069 * persistently */
1070 return 0;
1071}
1072
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001073struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1074{
1075 struct link_key *k;
1076
1077 list_for_each_entry(k, &hdev->link_keys, list) {
1078 struct key_master_id *id;
1079
1080 if (k->type != HCI_LK_SMP_LTK)
1081 continue;
1082
1083 if (k->dlen != sizeof(*id))
1084 continue;
1085
1086 id = (void *) &k->data;
1087 if (id->ediv == ediv &&
1088 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1089 return k;
1090 }
1091
1092 return NULL;
1093}
1094EXPORT_SYMBOL(hci_find_ltk);
1095
1096struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1097 bdaddr_t *bdaddr, u8 type)
1098{
1099 struct link_key *k;
1100
1101 list_for_each_entry(k, &hdev->link_keys, list)
1102 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1103 return k;
1104
1105 return NULL;
1106}
1107EXPORT_SYMBOL(hci_find_link_key_type);
1108
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001109int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1110 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001111{
1112 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001113 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001114
1115 old_key = hci_find_link_key(hdev, bdaddr);
1116 if (old_key) {
1117 old_key_type = old_key->type;
1118 key = old_key;
1119 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001120 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001121 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1122 if (!key)
1123 return -ENOMEM;
1124 list_add(&key->list, &hdev->link_keys);
1125 }
1126
1127 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1128
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001129 /* Some buggy controller combinations generate a changed
1130 * combination key for legacy pairing even when there's no
1131 * previous key */
1132 if (type == HCI_LK_CHANGED_COMBINATION &&
1133 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001134 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001135 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001136 if (conn)
1137 conn->key_type = type;
1138 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001139
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001140 bacpy(&key->bdaddr, bdaddr);
1141 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001142 key->pin_len = pin_len;
1143
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001144 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001145 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001146 else
1147 key->type = type;
1148
Johan Hedberg4df378a2011-04-28 11:29:03 -07001149 if (!new_key)
1150 return 0;
1151
1152 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1153
1154 mgmt_new_key(hdev->id, key, persistent);
1155
1156 if (!persistent) {
1157 list_del(&key->list);
1158 kfree(key);
1159 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001160
1161 return 0;
1162}
1163
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001164int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001165 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001166{
1167 struct link_key *key, *old_key;
1168 struct key_master_id *id;
1169 u8 old_key_type;
1170
1171 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1172
1173 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1174 if (old_key) {
1175 key = old_key;
1176 old_key_type = old_key->type;
1177 } else {
1178 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1179 if (!key)
1180 return -ENOMEM;
1181 list_add(&key->list, &hdev->link_keys);
1182 old_key_type = 0xff;
1183 }
1184
1185 key->dlen = sizeof(*id);
1186
1187 bacpy(&key->bdaddr, bdaddr);
1188 memcpy(key->val, ltk, sizeof(key->val));
1189 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001190 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001191
1192 id = (void *) &key->data;
1193 id->ediv = ediv;
1194 memcpy(id->rand, rand, sizeof(id->rand));
1195
1196 if (new_key)
1197 mgmt_new_key(hdev->id, key, old_key_type);
1198
1199 return 0;
1200}
1201
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001202int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1203{
1204 struct link_key *key;
1205
1206 key = hci_find_link_key(hdev, bdaddr);
1207 if (!key)
1208 return -ENOENT;
1209
1210 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1211
1212 list_del(&key->list);
1213 kfree(key);
1214
1215 return 0;
1216}
1217
Ville Tervo6bd32322011-02-16 16:32:41 +02001218/* HCI command timer function */
1219static void hci_cmd_timer(unsigned long arg)
1220{
1221 struct hci_dev *hdev = (void *) arg;
1222
1223 BT_ERR("%s command tx timeout", hdev->name);
1224 atomic_set(&hdev->cmd_cnt, 1);
1225 tasklet_schedule(&hdev->cmd_task);
1226}
1227
Szymon Janc2763eda2011-03-22 13:12:22 +01001228struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1229 bdaddr_t *bdaddr)
1230{
1231 struct oob_data *data;
1232
1233 list_for_each_entry(data, &hdev->remote_oob_data, list)
1234 if (bacmp(bdaddr, &data->bdaddr) == 0)
1235 return data;
1236
1237 return NULL;
1238}
1239
1240int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1241{
1242 struct oob_data *data;
1243
1244 data = hci_find_remote_oob_data(hdev, bdaddr);
1245 if (!data)
1246 return -ENOENT;
1247
1248 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1249
1250 list_del(&data->list);
1251 kfree(data);
1252
1253 return 0;
1254}
1255
1256int hci_remote_oob_data_clear(struct hci_dev *hdev)
1257{
1258 struct oob_data *data, *n;
1259
1260 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1261 list_del(&data->list);
1262 kfree(data);
1263 }
1264
1265 return 0;
1266}
1267
1268int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1269 u8 *randomizer)
1270{
1271 struct oob_data *data;
1272
1273 data = hci_find_remote_oob_data(hdev, bdaddr);
1274
1275 if (!data) {
1276 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1277 if (!data)
1278 return -ENOMEM;
1279
1280 bacpy(&data->bdaddr, bdaddr);
1281 list_add(&data->list, &hdev->remote_oob_data);
1282 }
1283
1284 memcpy(data->hash, hash, sizeof(data->hash));
1285 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1286
1287 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1288
1289 return 0;
1290}
1291
Antti Julkub2a66aa2011-06-15 12:01:14 +03001292struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1293 bdaddr_t *bdaddr)
1294{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001295 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001296
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001297 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001298 if (bacmp(bdaddr, &b->bdaddr) == 0)
1299 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001300
1301 return NULL;
1302}
1303
1304int hci_blacklist_clear(struct hci_dev *hdev)
1305{
1306 struct list_head *p, *n;
1307
1308 list_for_each_safe(p, n, &hdev->blacklist) {
1309 struct bdaddr_list *b;
1310
1311 b = list_entry(p, struct bdaddr_list, list);
1312
1313 list_del(p);
1314 kfree(b);
1315 }
1316
1317 return 0;
1318}
1319
1320int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1321{
1322 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001323
1324 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1325 return -EBADF;
1326
Antti Julku5e762442011-08-25 16:48:02 +03001327 if (hci_blacklist_lookup(hdev, bdaddr))
1328 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001329
1330 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001331 if (!entry)
1332 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001333
1334 bacpy(&entry->bdaddr, bdaddr);
1335
1336 list_add(&entry->list, &hdev->blacklist);
1337
Antti Julku5e762442011-08-25 16:48:02 +03001338 return mgmt_device_blocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001339}
1340
1341int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1342{
1343 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001344
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001345 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
Antti Julku5e762442011-08-25 16:48:02 +03001346 return hci_blacklist_clear(hdev);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001347 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001348
1349 entry = hci_blacklist_lookup(hdev, bdaddr);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001350 if (!entry) {
Antti Julku5e762442011-08-25 16:48:02 +03001351 return -ENOENT;
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001352 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001353
1354 list_del(&entry->list);
1355 kfree(entry);
1356
Antti Julku5e762442011-08-25 16:48:02 +03001357 return mgmt_device_unblocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001358}
1359
Andre Guedes35815082011-05-26 16:23:53 -03001360static void hci_clear_adv_cache(unsigned long arg)
1361{
1362 struct hci_dev *hdev = (void *) arg;
1363
1364 hci_dev_lock(hdev);
1365
1366 hci_adv_entries_clear(hdev);
1367
1368 hci_dev_unlock(hdev);
1369}
1370
Andre Guedes76c86862011-05-26 16:23:50 -03001371int hci_adv_entries_clear(struct hci_dev *hdev)
1372{
1373 struct adv_entry *entry, *tmp;
1374
1375 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1376 list_del(&entry->list);
1377 kfree(entry);
1378 }
1379
1380 BT_DBG("%s adv cache cleared", hdev->name);
1381
1382 return 0;
1383}
1384
1385struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1386{
1387 struct adv_entry *entry;
1388
1389 list_for_each_entry(entry, &hdev->adv_entries, list)
1390 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1391 return entry;
1392
1393 return NULL;
1394}
1395
1396static inline int is_connectable_adv(u8 evt_type)
1397{
1398 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1399 return 1;
1400
1401 return 0;
1402}
1403
1404int hci_add_adv_entry(struct hci_dev *hdev,
1405 struct hci_ev_le_advertising_info *ev)
1406{
1407 struct adv_entry *entry;
1408
1409 if (!is_connectable_adv(ev->evt_type))
1410 return -EINVAL;
1411
1412 /* Only new entries should be added to adv_entries. So, if
1413 * bdaddr was found, don't add it. */
1414 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1415 return 0;
1416
1417 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1418 if (!entry)
1419 return -ENOMEM;
1420
1421 bacpy(&entry->bdaddr, &ev->bdaddr);
1422 entry->bdaddr_type = ev->bdaddr_type;
1423
1424 list_add(&entry->list, &hdev->adv_entries);
1425
1426 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1427 batostr(&entry->bdaddr), entry->bdaddr_type);
1428
1429 return 0;
1430}
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432/* Register HCI device */
1433int hci_register_dev(struct hci_dev *hdev)
1434{
1435 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001436 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001438 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1439 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 if (!hdev->open || !hdev->close || !hdev->destruct)
1442 return -EINVAL;
1443
Mat Martineau08add512011-11-02 16:18:36 -07001444 /* Do not allow HCI_AMP devices to register at index 0,
1445 * so the index can be used as the AMP controller ID.
1446 */
1447 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 write_lock_bh(&hci_dev_list_lock);
1450
1451 /* Find first available device id */
1452 list_for_each(p, &hci_dev_list) {
1453 if (list_entry(p, struct hci_dev, list)->id != id)
1454 break;
1455 head = p; id++;
1456 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 sprintf(hdev->name, "hci%d", id);
1459 hdev->id = id;
1460 list_add(&hdev->list, head);
1461
1462 atomic_set(&hdev->refcnt, 1);
1463 spin_lock_init(&hdev->lock);
1464
1465 hdev->flags = 0;
1466 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001467 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001469 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
Marcel Holtmann04837f62006-07-03 10:02:33 +02001471 hdev->idle_timeout = 0;
1472 hdev->sniff_max_interval = 800;
1473 hdev->sniff_min_interval = 80;
1474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001475 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1477 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1478
1479 skb_queue_head_init(&hdev->rx_q);
1480 skb_queue_head_init(&hdev->cmd_q);
1481 skb_queue_head_init(&hdev->raw_q);
1482
Ville Tervo6bd32322011-02-16 16:32:41 +02001483 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1484
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301485 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001486 hdev->reassembly[i] = NULL;
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001489 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 inquiry_cache_init(hdev);
1492
1493 hci_conn_hash_init(hdev);
1494
David Millerea4bd8b2010-07-30 21:54:49 -07001495 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001496
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001497 INIT_LIST_HEAD(&hdev->uuids);
1498
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001499 INIT_LIST_HEAD(&hdev->link_keys);
1500
Szymon Janc2763eda2011-03-22 13:12:22 +01001501 INIT_LIST_HEAD(&hdev->remote_oob_data);
1502
Andre Guedes76c86862011-05-26 16:23:50 -03001503 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001504 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1505 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001506
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001507 INIT_WORK(&hdev->power_on, hci_power_on);
1508 INIT_WORK(&hdev->power_off, hci_power_off);
1509 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1510
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001511 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1512
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1514
1515 atomic_set(&hdev->promisc, 0);
1516
1517 write_unlock_bh(&hci_dev_list_lock);
1518
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001519 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001520 if (!hdev->workqueue) {
1521 error = -ENOMEM;
1522 goto err;
1523 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001524
David Herrmann33ca9542011-10-08 14:58:49 +02001525 error = hci_add_sysfs(hdev);
1526 if (error < 0)
1527 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001529 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1530 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1531 if (hdev->rfkill) {
1532 if (rfkill_register(hdev->rfkill) < 0) {
1533 rfkill_destroy(hdev->rfkill);
1534 hdev->rfkill = NULL;
1535 }
1536 }
1537
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001538 set_bit(HCI_AUTO_OFF, &hdev->flags);
1539 set_bit(HCI_SETUP, &hdev->flags);
1540 queue_work(hdev->workqueue, &hdev->power_on);
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 hci_notify(hdev, HCI_DEV_REG);
1543
1544 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001545
David Herrmann33ca9542011-10-08 14:58:49 +02001546err_wqueue:
1547 destroy_workqueue(hdev->workqueue);
1548err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001549 write_lock_bh(&hci_dev_list_lock);
1550 list_del(&hdev->list);
1551 write_unlock_bh(&hci_dev_list_lock);
1552
David Herrmann33ca9542011-10-08 14:58:49 +02001553 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554}
1555EXPORT_SYMBOL(hci_register_dev);
1556
1557/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001558void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559{
Marcel Holtmannef222012007-07-11 06:42:04 +02001560 int i;
1561
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001562 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 write_lock_bh(&hci_dev_list_lock);
1565 list_del(&hdev->list);
1566 write_unlock_bh(&hci_dev_list_lock);
1567
1568 hci_dev_do_close(hdev);
1569
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301570 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001571 kfree_skb(hdev->reassembly[i]);
1572
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001573 if (!test_bit(HCI_INIT, &hdev->flags) &&
1574 !test_bit(HCI_SETUP, &hdev->flags))
1575 mgmt_index_removed(hdev->id);
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 hci_notify(hdev, HCI_DEV_UNREG);
1578
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001579 if (hdev->rfkill) {
1580 rfkill_unregister(hdev->rfkill);
1581 rfkill_destroy(hdev->rfkill);
1582 }
1583
David Herrmannce242972011-10-08 14:58:48 +02001584 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001585
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001586 hci_del_off_timer(hdev);
Andre Guedes35815082011-05-26 16:23:53 -03001587 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001588
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001589 destroy_workqueue(hdev->workqueue);
1590
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001591 hci_dev_lock_bh(hdev);
1592 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001593 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001594 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001595 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001596 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001597 hci_dev_unlock_bh(hdev);
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600}
1601EXPORT_SYMBOL(hci_unregister_dev);
1602
1603/* Suspend HCI device */
1604int hci_suspend_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_SUSPEND);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_suspend_dev);
1610
1611/* Resume HCI device */
1612int hci_resume_dev(struct hci_dev *hdev)
1613{
1614 hci_notify(hdev, HCI_DEV_RESUME);
1615 return 0;
1616}
1617EXPORT_SYMBOL(hci_resume_dev);
1618
Marcel Holtmann76bca882009-11-18 00:40:39 +01001619/* Receive frame from HCI drivers */
1620int hci_recv_frame(struct sk_buff *skb)
1621{
1622 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1623 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1624 && !test_bit(HCI_INIT, &hdev->flags))) {
1625 kfree_skb(skb);
1626 return -ENXIO;
1627 }
1628
1629 /* Incomming skb */
1630 bt_cb(skb)->incoming = 1;
1631
1632 /* Time stamp */
1633 __net_timestamp(skb);
1634
1635 /* Queue frame for rx task */
1636 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001637 tasklet_schedule(&hdev->rx_task);
1638
Marcel Holtmann76bca882009-11-18 00:40:39 +01001639 return 0;
1640}
1641EXPORT_SYMBOL(hci_recv_frame);
1642
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301643static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001644 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301645{
1646 int len = 0;
1647 int hlen = 0;
1648 int remain = count;
1649 struct sk_buff *skb;
1650 struct bt_skb_cb *scb;
1651
1652 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1653 index >= NUM_REASSEMBLY)
1654 return -EILSEQ;
1655
1656 skb = hdev->reassembly[index];
1657
1658 if (!skb) {
1659 switch (type) {
1660 case HCI_ACLDATA_PKT:
1661 len = HCI_MAX_FRAME_SIZE;
1662 hlen = HCI_ACL_HDR_SIZE;
1663 break;
1664 case HCI_EVENT_PKT:
1665 len = HCI_MAX_EVENT_SIZE;
1666 hlen = HCI_EVENT_HDR_SIZE;
1667 break;
1668 case HCI_SCODATA_PKT:
1669 len = HCI_MAX_SCO_SIZE;
1670 hlen = HCI_SCO_HDR_SIZE;
1671 break;
1672 }
1673
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001674 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301675 if (!skb)
1676 return -ENOMEM;
1677
1678 scb = (void *) skb->cb;
1679 scb->expect = hlen;
1680 scb->pkt_type = type;
1681
1682 skb->dev = (void *) hdev;
1683 hdev->reassembly[index] = skb;
1684 }
1685
1686 while (count) {
1687 scb = (void *) skb->cb;
1688 len = min(scb->expect, (__u16)count);
1689
1690 memcpy(skb_put(skb, len), data, len);
1691
1692 count -= len;
1693 data += len;
1694 scb->expect -= len;
1695 remain = count;
1696
1697 switch (type) {
1698 case HCI_EVENT_PKT:
1699 if (skb->len == HCI_EVENT_HDR_SIZE) {
1700 struct hci_event_hdr *h = hci_event_hdr(skb);
1701 scb->expect = h->plen;
1702
1703 if (skb_tailroom(skb) < scb->expect) {
1704 kfree_skb(skb);
1705 hdev->reassembly[index] = NULL;
1706 return -ENOMEM;
1707 }
1708 }
1709 break;
1710
1711 case HCI_ACLDATA_PKT:
1712 if (skb->len == HCI_ACL_HDR_SIZE) {
1713 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1714 scb->expect = __le16_to_cpu(h->dlen);
1715
1716 if (skb_tailroom(skb) < scb->expect) {
1717 kfree_skb(skb);
1718 hdev->reassembly[index] = NULL;
1719 return -ENOMEM;
1720 }
1721 }
1722 break;
1723
1724 case HCI_SCODATA_PKT:
1725 if (skb->len == HCI_SCO_HDR_SIZE) {
1726 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1727 scb->expect = h->dlen;
1728
1729 if (skb_tailroom(skb) < scb->expect) {
1730 kfree_skb(skb);
1731 hdev->reassembly[index] = NULL;
1732 return -ENOMEM;
1733 }
1734 }
1735 break;
1736 }
1737
1738 if (scb->expect == 0) {
1739 /* Complete frame */
1740
1741 bt_cb(skb)->pkt_type = type;
1742 hci_recv_frame(skb);
1743
1744 hdev->reassembly[index] = NULL;
1745 return remain;
1746 }
1747 }
1748
1749 return remain;
1750}
1751
Marcel Holtmannef222012007-07-11 06:42:04 +02001752int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1753{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301754 int rem = 0;
1755
Marcel Holtmannef222012007-07-11 06:42:04 +02001756 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1757 return -EILSEQ;
1758
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001759 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001760 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301761 if (rem < 0)
1762 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001763
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301764 data += (count - rem);
1765 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001766 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001767
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301768 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001769}
1770EXPORT_SYMBOL(hci_recv_fragment);
1771
Suraj Sumangala99811512010-07-14 13:02:19 +05301772#define STREAM_REASSEMBLY 0
1773
1774int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1775{
1776 int type;
1777 int rem = 0;
1778
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001779 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301780 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1781
1782 if (!skb) {
1783 struct { char type; } *pkt;
1784
1785 /* Start of the frame */
1786 pkt = data;
1787 type = pkt->type;
1788
1789 data++;
1790 count--;
1791 } else
1792 type = bt_cb(skb)->pkt_type;
1793
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001794 rem = hci_reassembly(hdev, type, data, count,
1795 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301796 if (rem < 0)
1797 return rem;
1798
1799 data += (count - rem);
1800 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001801 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301802
1803 return rem;
1804}
1805EXPORT_SYMBOL(hci_recv_stream_fragment);
1806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807/* ---- Interface to upper protocols ---- */
1808
1809/* Register/Unregister protocols.
1810 * hci_task_lock is used to ensure that no tasks are running. */
1811int hci_register_proto(struct hci_proto *hp)
1812{
1813 int err = 0;
1814
1815 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1816
1817 if (hp->id >= HCI_MAX_PROTO)
1818 return -EINVAL;
1819
1820 write_lock_bh(&hci_task_lock);
1821
1822 if (!hci_proto[hp->id])
1823 hci_proto[hp->id] = hp;
1824 else
1825 err = -EEXIST;
1826
1827 write_unlock_bh(&hci_task_lock);
1828
1829 return err;
1830}
1831EXPORT_SYMBOL(hci_register_proto);
1832
1833int hci_unregister_proto(struct hci_proto *hp)
1834{
1835 int err = 0;
1836
1837 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1838
1839 if (hp->id >= HCI_MAX_PROTO)
1840 return -EINVAL;
1841
1842 write_lock_bh(&hci_task_lock);
1843
1844 if (hci_proto[hp->id])
1845 hci_proto[hp->id] = NULL;
1846 else
1847 err = -ENOENT;
1848
1849 write_unlock_bh(&hci_task_lock);
1850
1851 return err;
1852}
1853EXPORT_SYMBOL(hci_unregister_proto);
1854
1855int hci_register_cb(struct hci_cb *cb)
1856{
1857 BT_DBG("%p name %s", cb, cb->name);
1858
1859 write_lock_bh(&hci_cb_list_lock);
1860 list_add(&cb->list, &hci_cb_list);
1861 write_unlock_bh(&hci_cb_list_lock);
1862
1863 return 0;
1864}
1865EXPORT_SYMBOL(hci_register_cb);
1866
1867int hci_unregister_cb(struct hci_cb *cb)
1868{
1869 BT_DBG("%p name %s", cb, cb->name);
1870
1871 write_lock_bh(&hci_cb_list_lock);
1872 list_del(&cb->list);
1873 write_unlock_bh(&hci_cb_list_lock);
1874
1875 return 0;
1876}
1877EXPORT_SYMBOL(hci_unregister_cb);
1878
1879static int hci_send_frame(struct sk_buff *skb)
1880{
1881 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1882
1883 if (!hdev) {
1884 kfree_skb(skb);
1885 return -ENODEV;
1886 }
1887
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001888 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889
1890 if (atomic_read(&hdev->promisc)) {
1891 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001892 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001894 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 }
1896
1897 /* Get rid of skb owner, prior to sending to the driver. */
1898 skb_orphan(skb);
1899
1900 return hdev->send(skb);
1901}
1902
1903/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001904int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905{
1906 int len = HCI_COMMAND_HDR_SIZE + plen;
1907 struct hci_command_hdr *hdr;
1908 struct sk_buff *skb;
1909
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001910 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
1912 skb = bt_skb_alloc(len, GFP_ATOMIC);
1913 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001914 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 return -ENOMEM;
1916 }
1917
1918 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001919 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 hdr->plen = plen;
1921
1922 if (plen)
1923 memcpy(skb_put(skb, plen), param, plen);
1924
1925 BT_DBG("skb len %d", skb->len);
1926
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001927 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001929
Johan Hedberga5040ef2011-01-10 13:28:59 +02001930 if (test_bit(HCI_INIT, &hdev->flags))
1931 hdev->init_last_cmd = opcode;
1932
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001934 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936 return 0;
1937}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001940void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941{
1942 struct hci_command_hdr *hdr;
1943
1944 if (!hdev->sent_cmd)
1945 return NULL;
1946
1947 hdr = (void *) hdev->sent_cmd->data;
1948
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001949 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 return NULL;
1951
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001952 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1955}
1956
1957/* Send ACL data */
1958static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1959{
1960 struct hci_acl_hdr *hdr;
1961 int len = skb->len;
1962
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001963 skb_push(skb, HCI_ACL_HDR_SIZE);
1964 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001965 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001966 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1967 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968}
1969
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001970static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1971 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972{
1973 struct hci_dev *hdev = conn->hdev;
1974 struct sk_buff *list;
1975
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001976 list = skb_shinfo(skb)->frag_list;
1977 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 /* Non fragmented */
1979 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1980
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001981 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 } else {
1983 /* Fragmented */
1984 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1985
1986 skb_shinfo(skb)->frag_list = NULL;
1987
1988 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001989 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001991 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001992
1993 flags &= ~ACL_START;
1994 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 do {
1996 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001999 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002000 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002004 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 } while (list);
2006
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002007 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002009}
2010
2011void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2012{
2013 struct hci_conn *conn = chan->conn;
2014 struct hci_dev *hdev = conn->hdev;
2015
2016 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2017
2018 skb->dev = (void *) hdev;
2019 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2020 hci_add_acl_hdr(skb, conn->handle, flags);
2021
2022 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002024 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025}
2026EXPORT_SYMBOL(hci_send_acl);
2027
2028/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002029void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 struct hci_dev *hdev = conn->hdev;
2032 struct hci_sco_hdr hdr;
2033
2034 BT_DBG("%s len %d", hdev->name, skb->len);
2035
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002036 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 hdr.dlen = skb->len;
2038
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002039 skb_push(skb, HCI_SCO_HDR_SIZE);
2040 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002041 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
2043 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002044 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002047 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048}
2049EXPORT_SYMBOL(hci_send_sco);
2050
2051/* ---- HCI TX task (outgoing data) ---- */
2052
2053/* HCI Connection scheduler */
2054static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2055{
2056 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002057 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002060 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002062 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002063 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002065
2066 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2067 continue;
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 num++;
2070
2071 if (c->sent < min) {
2072 min = c->sent;
2073 conn = c;
2074 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002075
2076 if (hci_conn_num(hdev, type) == num)
2077 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 }
2079
2080 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002081 int cnt, q;
2082
2083 switch (conn->type) {
2084 case ACL_LINK:
2085 cnt = hdev->acl_cnt;
2086 break;
2087 case SCO_LINK:
2088 case ESCO_LINK:
2089 cnt = hdev->sco_cnt;
2090 break;
2091 case LE_LINK:
2092 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2093 break;
2094 default:
2095 cnt = 0;
2096 BT_ERR("Unknown link type");
2097 }
2098
2099 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 *quote = q ? q : 1;
2101 } else
2102 *quote = 0;
2103
2104 BT_DBG("conn %p quote %d", conn, *quote);
2105 return conn;
2106}
2107
Ville Tervobae1f5d2011-02-10 22:38:53 -03002108static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
2110 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002111 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
Ville Tervobae1f5d2011-02-10 22:38:53 -03002113 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002116 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002117 if (c->type == type && c->sent) {
2118 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 hdev->name, batostr(&c->dst));
2120 hci_acl_disconn(c, 0x13);
2121 }
2122 }
2123}
2124
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002125static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 int *quote)
2127{
2128 struct hci_conn_hash *h = &hdev->conn_hash;
2129 struct hci_chan *chan = NULL;
2130 int num = 0, min = ~0, cur_prio = 0;
2131 struct hci_conn *conn;
2132 int cnt, q, conn_num = 0;
2133
2134 BT_DBG("%s", hdev->name);
2135
2136 list_for_each_entry(conn, &h->list, list) {
2137 struct hci_chan_hash *ch;
2138 struct hci_chan *tmp;
2139
2140 if (conn->type != type)
2141 continue;
2142
2143 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2144 continue;
2145
2146 conn_num++;
2147
2148 ch = &conn->chan_hash;
2149
2150 list_for_each_entry(tmp, &ch->list, list) {
2151 struct sk_buff *skb;
2152
2153 if (skb_queue_empty(&tmp->data_q))
2154 continue;
2155
2156 skb = skb_peek(&tmp->data_q);
2157 if (skb->priority < cur_prio)
2158 continue;
2159
2160 if (skb->priority > cur_prio) {
2161 num = 0;
2162 min = ~0;
2163 cur_prio = skb->priority;
2164 }
2165
2166 num++;
2167
2168 if (conn->sent < min) {
2169 min = conn->sent;
2170 chan = tmp;
2171 }
2172 }
2173
2174 if (hci_conn_num(hdev, type) == conn_num)
2175 break;
2176 }
2177
2178 if (!chan)
2179 return NULL;
2180
2181 switch (chan->conn->type) {
2182 case ACL_LINK:
2183 cnt = hdev->acl_cnt;
2184 break;
2185 case SCO_LINK:
2186 case ESCO_LINK:
2187 cnt = hdev->sco_cnt;
2188 break;
2189 case LE_LINK:
2190 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2191 break;
2192 default:
2193 cnt = 0;
2194 BT_ERR("Unknown link type");
2195 }
2196
2197 q = cnt / num;
2198 *quote = q ? q : 1;
2199 BT_DBG("chan %p quote %d", chan, *quote);
2200 return chan;
2201}
2202
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002203static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2204{
2205 struct hci_conn_hash *h = &hdev->conn_hash;
2206 struct hci_conn *conn;
2207 int num = 0;
2208
2209 BT_DBG("%s", hdev->name);
2210
2211 list_for_each_entry(conn, &h->list, list) {
2212 struct hci_chan_hash *ch;
2213 struct hci_chan *chan;
2214
2215 if (conn->type != type)
2216 continue;
2217
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219 continue;
2220
2221 num++;
2222
2223 ch = &conn->chan_hash;
2224 list_for_each_entry(chan, &ch->list, list) {
2225 struct sk_buff *skb;
2226
2227 if (chan->sent) {
2228 chan->sent = 0;
2229 continue;
2230 }
2231
2232 if (skb_queue_empty(&chan->data_q))
2233 continue;
2234
2235 skb = skb_peek(&chan->data_q);
2236 if (skb->priority >= HCI_PRIO_MAX - 1)
2237 continue;
2238
2239 skb->priority = HCI_PRIO_MAX - 1;
2240
2241 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2242 skb->priority);
2243 }
2244
2245 if (hci_conn_num(hdev, type) == num)
2246 break;
2247 }
2248}
2249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250static inline void hci_sched_acl(struct hci_dev *hdev)
2251{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002252 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 struct sk_buff *skb;
2254 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002255 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
2257 BT_DBG("%s", hdev->name);
2258
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002259 if (!hci_conn_num(hdev, ACL_LINK))
2260 return;
2261
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 if (!test_bit(HCI_RAW, &hdev->flags)) {
2263 /* ACL tx timeout must be longer than maximum
2264 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002265 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002266 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
2268
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002269 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002270
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002271 while (hdev->acl_cnt &&
2272 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002273 u32 priority = (skb_peek(&chan->data_q))->priority;
2274 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002275 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2276 skb->len, skb->priority);
2277
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002278 /* Stop if priority has changed */
2279 if (skb->priority < priority)
2280 break;
2281
2282 skb = skb_dequeue(&chan->data_q);
2283
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002284 hci_conn_enter_active_mode(chan->conn,
2285 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002286
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 hci_send_frame(skb);
2288 hdev->acl_last_tx = jiffies;
2289
2290 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002291 chan->sent++;
2292 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 }
2294 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002295
2296 if (cnt != hdev->acl_cnt)
2297 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299
2300/* Schedule SCO */
2301static inline void hci_sched_sco(struct hci_dev *hdev)
2302{
2303 struct hci_conn *conn;
2304 struct sk_buff *skb;
2305 int quote;
2306
2307 BT_DBG("%s", hdev->name);
2308
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002309 if (!hci_conn_num(hdev, SCO_LINK))
2310 return;
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2313 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2314 BT_DBG("skb %p len %d", skb, skb->len);
2315 hci_send_frame(skb);
2316
2317 conn->sent++;
2318 if (conn->sent == ~0)
2319 conn->sent = 0;
2320 }
2321 }
2322}
2323
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002324static inline void hci_sched_esco(struct hci_dev *hdev)
2325{
2326 struct hci_conn *conn;
2327 struct sk_buff *skb;
2328 int quote;
2329
2330 BT_DBG("%s", hdev->name);
2331
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002332 if (!hci_conn_num(hdev, ESCO_LINK))
2333 return;
2334
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002335 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2336 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2337 BT_DBG("skb %p len %d", skb, skb->len);
2338 hci_send_frame(skb);
2339
2340 conn->sent++;
2341 if (conn->sent == ~0)
2342 conn->sent = 0;
2343 }
2344 }
2345}
2346
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002347static inline void hci_sched_le(struct hci_dev *hdev)
2348{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002349 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002350 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002351 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002352
2353 BT_DBG("%s", hdev->name);
2354
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002355 if (!hci_conn_num(hdev, LE_LINK))
2356 return;
2357
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002358 if (!test_bit(HCI_RAW, &hdev->flags)) {
2359 /* LE tx timeout must be longer than maximum
2360 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002361 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002362 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002363 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002364 }
2365
2366 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002367 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002368 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002369 u32 priority = (skb_peek(&chan->data_q))->priority;
2370 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002371 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2372 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002373
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002374 /* Stop if priority has changed */
2375 if (skb->priority < priority)
2376 break;
2377
2378 skb = skb_dequeue(&chan->data_q);
2379
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002380 hci_send_frame(skb);
2381 hdev->le_last_tx = jiffies;
2382
2383 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002384 chan->sent++;
2385 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002386 }
2387 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002388
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002389 if (hdev->le_pkts)
2390 hdev->le_cnt = cnt;
2391 else
2392 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002393
2394 if (cnt != tmp)
2395 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002396}
2397
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398static void hci_tx_task(unsigned long arg)
2399{
2400 struct hci_dev *hdev = (struct hci_dev *) arg;
2401 struct sk_buff *skb;
2402
2403 read_lock(&hci_task_lock);
2404
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002405 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2406 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
2408 /* Schedule queues and send stuff to HCI driver */
2409
2410 hci_sched_acl(hdev);
2411
2412 hci_sched_sco(hdev);
2413
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002414 hci_sched_esco(hdev);
2415
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002416 hci_sched_le(hdev);
2417
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 /* Send next queued raw (unknown type) packet */
2419 while ((skb = skb_dequeue(&hdev->raw_q)))
2420 hci_send_frame(skb);
2421
2422 read_unlock(&hci_task_lock);
2423}
2424
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002425/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427/* ACL data packet */
2428static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2429{
2430 struct hci_acl_hdr *hdr = (void *) skb->data;
2431 struct hci_conn *conn;
2432 __u16 handle, flags;
2433
2434 skb_pull(skb, HCI_ACL_HDR_SIZE);
2435
2436 handle = __le16_to_cpu(hdr->handle);
2437 flags = hci_flags(handle);
2438 handle = hci_handle(handle);
2439
2440 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2441
2442 hdev->stat.acl_rx++;
2443
2444 hci_dev_lock(hdev);
2445 conn = hci_conn_hash_lookup_handle(hdev, handle);
2446 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002447
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 if (conn) {
2449 register struct hci_proto *hp;
2450
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002451 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002452
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002454 hp = hci_proto[HCI_PROTO_L2CAP];
2455 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 hp->recv_acldata(conn, skb, flags);
2457 return;
2458 }
2459 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002460 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 hdev->name, handle);
2462 }
2463
2464 kfree_skb(skb);
2465}
2466
2467/* SCO data packet */
2468static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2469{
2470 struct hci_sco_hdr *hdr = (void *) skb->data;
2471 struct hci_conn *conn;
2472 __u16 handle;
2473
2474 skb_pull(skb, HCI_SCO_HDR_SIZE);
2475
2476 handle = __le16_to_cpu(hdr->handle);
2477
2478 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2479
2480 hdev->stat.sco_rx++;
2481
2482 hci_dev_lock(hdev);
2483 conn = hci_conn_hash_lookup_handle(hdev, handle);
2484 hci_dev_unlock(hdev);
2485
2486 if (conn) {
2487 register struct hci_proto *hp;
2488
2489 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002490 hp = hci_proto[HCI_PROTO_SCO];
2491 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 hp->recv_scodata(conn, skb);
2493 return;
2494 }
2495 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002496 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 hdev->name, handle);
2498 }
2499
2500 kfree_skb(skb);
2501}
2502
Marcel Holtmann65164552005-10-28 19:20:48 +02002503static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504{
2505 struct hci_dev *hdev = (struct hci_dev *) arg;
2506 struct sk_buff *skb;
2507
2508 BT_DBG("%s", hdev->name);
2509
2510 read_lock(&hci_task_lock);
2511
2512 while ((skb = skb_dequeue(&hdev->rx_q))) {
2513 if (atomic_read(&hdev->promisc)) {
2514 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002515 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
2517
2518 if (test_bit(HCI_RAW, &hdev->flags)) {
2519 kfree_skb(skb);
2520 continue;
2521 }
2522
2523 if (test_bit(HCI_INIT, &hdev->flags)) {
2524 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002525 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 case HCI_ACLDATA_PKT:
2527 case HCI_SCODATA_PKT:
2528 kfree_skb(skb);
2529 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 }
2532
2533 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002534 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 case HCI_EVENT_PKT:
2536 hci_event_packet(hdev, skb);
2537 break;
2538
2539 case HCI_ACLDATA_PKT:
2540 BT_DBG("%s ACL data packet", hdev->name);
2541 hci_acldata_packet(hdev, skb);
2542 break;
2543
2544 case HCI_SCODATA_PKT:
2545 BT_DBG("%s SCO data packet", hdev->name);
2546 hci_scodata_packet(hdev, skb);
2547 break;
2548
2549 default:
2550 kfree_skb(skb);
2551 break;
2552 }
2553 }
2554
2555 read_unlock(&hci_task_lock);
2556}
2557
2558static void hci_cmd_task(unsigned long arg)
2559{
2560 struct hci_dev *hdev = (struct hci_dev *) arg;
2561 struct sk_buff *skb;
2562
2563 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002566 if (atomic_read(&hdev->cmd_cnt)) {
2567 skb = skb_dequeue(&hdev->cmd_q);
2568 if (!skb)
2569 return;
2570
Wei Yongjun7585b972009-02-25 18:29:52 +08002571 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002573 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2574 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 atomic_dec(&hdev->cmd_cnt);
2576 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002577 if (test_bit(HCI_RESET, &hdev->flags))
2578 del_timer(&hdev->cmd_timer);
2579 else
2580 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002581 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 } else {
2583 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002584 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 }
2586 }
2587}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002588
2589int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2590{
2591 /* General inquiry access code (GIAC) */
2592 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2593 struct hci_cp_inquiry cp;
2594
2595 BT_DBG("%s", hdev->name);
2596
2597 if (test_bit(HCI_INQUIRY, &hdev->flags))
2598 return -EINPROGRESS;
2599
2600 memset(&cp, 0, sizeof(cp));
2601 memcpy(&cp.lap, lap, sizeof(cp.lap));
2602 cp.length = length;
2603
2604 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2605}
Andre Guedes023d5042011-11-04 14:16:52 -03002606
2607int hci_cancel_inquiry(struct hci_dev *hdev)
2608{
2609 BT_DBG("%s", hdev->name);
2610
2611 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EPERM;
2613
2614 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2615}