blob: 19e44533fb01223466c20449a5b89754c9526036 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080076static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
Alan Sterne041c682006-03-27 01:16:30 -080087 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Marcel Holtmann65164552005-10-28 19:20:48 +020090static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Alan Sterne041c682006-03-27 01:16:30 -080092 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/* ---- HCI requests ---- */
96
Johan Hedberg23bb5762010-12-21 23:01:27 +020097void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Johan Hedberg23bb5762010-12-21 23:01:27 +020099 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
Johan Hedberga5040ef2011-01-10 13:28:59 +0200101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200225 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241#endif
242
243 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200258 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
Marcel Holtmanna418b892008-11-30 12:17:28 +0100312 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900318/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200322 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200330 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200381
382 ie->next = cache->list;
383 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
Marcel Holtmann04837f62006-07-03 10:02:33 +0200455 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200470 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900486 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200503 hdev = hci_dev_get(dev);
504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
Marcel Holtmann943da252010-02-13 02:28:41 +0100524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200536 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Marcel Holtmann04837f62006-07-03 10:02:33 +0200538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Andre Guedeseead27d2011-06-30 19:20:55 -0300541 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300589 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
598 hci_dev_lock_bh(hdev);
599 inquiry_cache_flush(hdev);
600 hci_conn_hash_flush(hdev);
601 hci_dev_unlock_bh(hdev);
602
603 hci_notify(hdev, HCI_DEV_DOWN);
604
605 if (hdev->flush)
606 hdev->flush(hdev);
607
608 /* Reset device */
609 skb_queue_purge(&hdev->cmd_q);
610 atomic_set(&hdev->cmd_cnt, 1);
611 if (!test_bit(HCI_RAW, &hdev->flags)) {
612 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200614 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 clear_bit(HCI_INIT, &hdev->flags);
616 }
617
618 /* Kill cmd task */
619 tasklet_kill(&hdev->cmd_task);
620
621 /* Drop queues */
622 skb_queue_purge(&hdev->rx_q);
623 skb_queue_purge(&hdev->cmd_q);
624 skb_queue_purge(&hdev->raw_q);
625
626 /* Drop last sent command */
627 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300628 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 kfree_skb(hdev->sent_cmd);
630 hdev->sent_cmd = NULL;
631 }
632
633 /* After this point our queues are empty
634 * and no tasks are scheduled. */
635 hdev->close(hdev);
636
Johan Hedberg5add6af2010-12-16 10:00:37 +0200637 mgmt_powered(hdev->id, 0);
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 /* Clear flags */
640 hdev->flags = 0;
641
642 hci_req_unlock(hdev);
643
644 hci_dev_put(hdev);
645 return 0;
646}
647
648int hci_dev_close(__u16 dev)
649{
650 struct hci_dev *hdev;
651 int err;
652
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200653 hdev = hci_dev_get(dev);
654 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 return -ENODEV;
656 err = hci_dev_do_close(hdev);
657 hci_dev_put(hdev);
658 return err;
659}
660
661int hci_dev_reset(__u16 dev)
662{
663 struct hci_dev *hdev;
664 int ret = 0;
665
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200666 hdev = hci_dev_get(dev);
667 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 return -ENODEV;
669
670 hci_req_lock(hdev);
671 tasklet_disable(&hdev->tx_task);
672
673 if (!test_bit(HCI_UP, &hdev->flags))
674 goto done;
675
676 /* Drop queues */
677 skb_queue_purge(&hdev->rx_q);
678 skb_queue_purge(&hdev->cmd_q);
679
680 hci_dev_lock_bh(hdev);
681 inquiry_cache_flush(hdev);
682 hci_conn_hash_flush(hdev);
683 hci_dev_unlock_bh(hdev);
684
685 if (hdev->flush)
686 hdev->flush(hdev);
687
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900688 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300689 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
691 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200692 ret = __hci_request(hdev, hci_reset_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695done:
696 tasklet_enable(&hdev->tx_task);
697 hci_req_unlock(hdev);
698 hci_dev_put(hdev);
699 return ret;
700}
701
702int hci_dev_reset_stat(__u16 dev)
703{
704 struct hci_dev *hdev;
705 int ret = 0;
706
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200707 hdev = hci_dev_get(dev);
708 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return -ENODEV;
710
711 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
712
713 hci_dev_put(hdev);
714
715 return ret;
716}
717
718int hci_dev_cmd(unsigned int cmd, void __user *arg)
719{
720 struct hci_dev *hdev;
721 struct hci_dev_req dr;
722 int err = 0;
723
724 if (copy_from_user(&dr, arg, sizeof(dr)))
725 return -EFAULT;
726
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200727 hdev = hci_dev_get(dr.dev_id);
728 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return -ENODEV;
730
731 switch (cmd) {
732 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200733 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 break;
736
737 case HCISETENCRYPT:
738 if (!lmp_encrypt_capable(hdev)) {
739 err = -EOPNOTSUPP;
740 break;
741 }
742
743 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200745 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 if (err)
748 break;
749 }
750
Marcel Holtmann04837f62006-07-03 10:02:33 +0200751 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 break;
754
755 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200756 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 break;
759
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200760 case HCISETLINKPOL:
761 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
763 break;
764
765 case HCISETLINKMODE:
766 hdev->link_mode = ((__u16) dr.dev_opt) &
767 (HCI_LM_MASTER | HCI_LM_ACCEPT);
768 break;
769
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 case HCISETPTYPE:
771 hdev->pkt_type = (__u16) dr.dev_opt;
772 break;
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200775 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 break;
778
779 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200780 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 break;
783
784 default:
785 err = -EINVAL;
786 break;
787 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 hci_dev_put(hdev);
790 return err;
791}
792
793int hci_get_dev_list(void __user *arg)
794{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200795 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 struct hci_dev_list_req *dl;
797 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 int n = 0, size, err;
799 __u16 dev_num;
800
801 if (get_user(dev_num, (__u16 __user *) arg))
802 return -EFAULT;
803
804 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
805 return -EINVAL;
806
807 size = sizeof(*dl) + dev_num * sizeof(*dr);
808
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200809 dl = kzalloc(size, GFP_KERNEL);
810 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return -ENOMEM;
812
813 dr = dl->dev_req;
814
815 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200816 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200817 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200818
819 if (!test_bit(HCI_MGMT, &hdev->flags))
820 set_bit(HCI_PAIRABLE, &hdev->flags);
821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 if (++n >= dev_num)
826 break;
827 }
828 read_unlock_bh(&hci_dev_list_lock);
829
830 dl->dev_num = n;
831 size = sizeof(*dl) + n * sizeof(*dr);
832
833 err = copy_to_user(arg, dl, size);
834 kfree(dl);
835
836 return err ? -EFAULT : 0;
837}
838
839int hci_get_dev_info(void __user *arg)
840{
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
843 int err = 0;
844
845 if (copy_from_user(&di, arg, sizeof(di)))
846 return -EFAULT;
847
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200848 hdev = hci_dev_get(di.dev_id);
849 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 return -ENODEV;
851
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200852 hci_del_off_timer(hdev);
853
Johan Hedbergc542a062011-01-26 13:11:03 +0200854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
856
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 strcpy(di.name, hdev->name);
858 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100859 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 di.flags = hdev->flags;
861 di.pkt_type = hdev->pkt_type;
862 di.acl_mtu = hdev->acl_mtu;
863 di.acl_pkts = hdev->acl_pkts;
864 di.sco_mtu = hdev->sco_mtu;
865 di.sco_pkts = hdev->sco_pkts;
866 di.link_policy = hdev->link_policy;
867 di.link_mode = hdev->link_mode;
868
869 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870 memcpy(&di.features, &hdev->features, sizeof(di.features));
871
872 if (copy_to_user(arg, &di, sizeof(di)))
873 err = -EFAULT;
874
875 hci_dev_put(hdev);
876
877 return err;
878}
879
880/* ---- Interface to HCI drivers ---- */
881
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200882static int hci_rfkill_set_block(void *data, bool blocked)
883{
884 struct hci_dev *hdev = data;
885
886 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
887
888 if (!blocked)
889 return 0;
890
891 hci_dev_do_close(hdev);
892
893 return 0;
894}
895
896static const struct rfkill_ops hci_rfkill_ops = {
897 .set_block = hci_rfkill_set_block,
898};
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900/* Alloc HCI device */
901struct hci_dev *hci_alloc_dev(void)
902{
903 struct hci_dev *hdev;
904
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200905 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (!hdev)
907 return NULL;
908
David Herrmann0ac7e702011-10-08 14:58:47 +0200909 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200921 /* will free via device release */
922 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924EXPORT_SYMBOL(hci_free_dev);
925
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001005 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001006
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001007 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001008 if (bacmp(bdaddr, &k->bdaddr) == 0)
1009 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001010
1011 return NULL;
1012}
1013
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001014static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015 u8 key_type, u8 old_key_type)
1016{
1017 /* Legacy key */
1018 if (key_type < 0x03)
1019 return 1;
1020
1021 /* Debug keys are insecure so don't store them persistently */
1022 if (key_type == HCI_LK_DEBUG_COMBINATION)
1023 return 0;
1024
1025 /* Changed combination key and there's no previous one */
1026 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1027 return 0;
1028
1029 /* Security mode 3 case */
1030 if (!conn)
1031 return 1;
1032
1033 /* Neither local nor remote side had no-bonding as requirement */
1034 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1035 return 1;
1036
1037 /* Local side had dedicated bonding as requirement */
1038 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1039 return 1;
1040
1041 /* Remote side had dedicated bonding as requirement */
1042 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1043 return 1;
1044
1045 /* If none of the above criteria match, then don't store the key
1046 * persistently */
1047 return 0;
1048}
1049
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001050struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1051{
1052 struct link_key *k;
1053
1054 list_for_each_entry(k, &hdev->link_keys, list) {
1055 struct key_master_id *id;
1056
1057 if (k->type != HCI_LK_SMP_LTK)
1058 continue;
1059
1060 if (k->dlen != sizeof(*id))
1061 continue;
1062
1063 id = (void *) &k->data;
1064 if (id->ediv == ediv &&
1065 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1066 return k;
1067 }
1068
1069 return NULL;
1070}
1071EXPORT_SYMBOL(hci_find_ltk);
1072
1073struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074 bdaddr_t *bdaddr, u8 type)
1075{
1076 struct link_key *k;
1077
1078 list_for_each_entry(k, &hdev->link_keys, list)
1079 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1080 return k;
1081
1082 return NULL;
1083}
1084EXPORT_SYMBOL(hci_find_link_key_type);
1085
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001086int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001088{
1089 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001090 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001091
1092 old_key = hci_find_link_key(hdev, bdaddr);
1093 if (old_key) {
1094 old_key_type = old_key->type;
1095 key = old_key;
1096 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001097 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1099 if (!key)
1100 return -ENOMEM;
1101 list_add(&key->list, &hdev->link_keys);
1102 }
1103
1104 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1105
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001106 /* Some buggy controller combinations generate a changed
1107 * combination key for legacy pairing even when there's no
1108 * previous key */
1109 if (type == HCI_LK_CHANGED_COMBINATION &&
1110 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001111 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001112 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001113 if (conn)
1114 conn->key_type = type;
1115 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001116
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001117 bacpy(&key->bdaddr, bdaddr);
1118 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001119 key->pin_len = pin_len;
1120
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001121 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001122 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001123 else
1124 key->type = type;
1125
Johan Hedberg4df378a2011-04-28 11:29:03 -07001126 if (!new_key)
1127 return 0;
1128
1129 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1130
1131 mgmt_new_key(hdev->id, key, persistent);
1132
1133 if (!persistent) {
1134 list_del(&key->list);
1135 kfree(key);
1136 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001137
1138 return 0;
1139}
1140
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001141int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001142 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001143{
1144 struct link_key *key, *old_key;
1145 struct key_master_id *id;
1146 u8 old_key_type;
1147
1148 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1149
1150 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1151 if (old_key) {
1152 key = old_key;
1153 old_key_type = old_key->type;
1154 } else {
1155 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156 if (!key)
1157 return -ENOMEM;
1158 list_add(&key->list, &hdev->link_keys);
1159 old_key_type = 0xff;
1160 }
1161
1162 key->dlen = sizeof(*id);
1163
1164 bacpy(&key->bdaddr, bdaddr);
1165 memcpy(key->val, ltk, sizeof(key->val));
1166 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001167 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001168
1169 id = (void *) &key->data;
1170 id->ediv = ediv;
1171 memcpy(id->rand, rand, sizeof(id->rand));
1172
1173 if (new_key)
1174 mgmt_new_key(hdev->id, key, old_key_type);
1175
1176 return 0;
1177}
1178
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001179int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180{
1181 struct link_key *key;
1182
1183 key = hci_find_link_key(hdev, bdaddr);
1184 if (!key)
1185 return -ENOENT;
1186
1187 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189 list_del(&key->list);
1190 kfree(key);
1191
1192 return 0;
1193}
1194
Ville Tervo6bd32322011-02-16 16:32:41 +02001195/* HCI command timer function */
1196static void hci_cmd_timer(unsigned long arg)
1197{
1198 struct hci_dev *hdev = (void *) arg;
1199
1200 BT_ERR("%s command tx timeout", hdev->name);
1201 atomic_set(&hdev->cmd_cnt, 1);
1202 tasklet_schedule(&hdev->cmd_task);
1203}
1204
Szymon Janc2763eda2011-03-22 13:12:22 +01001205struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1206 bdaddr_t *bdaddr)
1207{
1208 struct oob_data *data;
1209
1210 list_for_each_entry(data, &hdev->remote_oob_data, list)
1211 if (bacmp(bdaddr, &data->bdaddr) == 0)
1212 return data;
1213
1214 return NULL;
1215}
1216
1217int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218{
1219 struct oob_data *data;
1220
1221 data = hci_find_remote_oob_data(hdev, bdaddr);
1222 if (!data)
1223 return -ENOENT;
1224
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227 list_del(&data->list);
1228 kfree(data);
1229
1230 return 0;
1231}
1232
1233int hci_remote_oob_data_clear(struct hci_dev *hdev)
1234{
1235 struct oob_data *data, *n;
1236
1237 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238 list_del(&data->list);
1239 kfree(data);
1240 }
1241
1242 return 0;
1243}
1244
1245int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1246 u8 *randomizer)
1247{
1248 struct oob_data *data;
1249
1250 data = hci_find_remote_oob_data(hdev, bdaddr);
1251
1252 if (!data) {
1253 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1254 if (!data)
1255 return -ENOMEM;
1256
1257 bacpy(&data->bdaddr, bdaddr);
1258 list_add(&data->list, &hdev->remote_oob_data);
1259 }
1260
1261 memcpy(data->hash, hash, sizeof(data->hash));
1262 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1263
1264 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1265
1266 return 0;
1267}
1268
Antti Julkub2a66aa2011-06-15 12:01:14 +03001269struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1270 bdaddr_t *bdaddr)
1271{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001272 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001273
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001274 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001275 if (bacmp(bdaddr, &b->bdaddr) == 0)
1276 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001277
1278 return NULL;
1279}
1280
1281int hci_blacklist_clear(struct hci_dev *hdev)
1282{
1283 struct list_head *p, *n;
1284
1285 list_for_each_safe(p, n, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1287
1288 b = list_entry(p, struct bdaddr_list, list);
1289
1290 list_del(p);
1291 kfree(b);
1292 }
1293
1294 return 0;
1295}
1296
1297int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298{
1299 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001300
1301 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1302 return -EBADF;
1303
Antti Julku5e762442011-08-25 16:48:02 +03001304 if (hci_blacklist_lookup(hdev, bdaddr))
1305 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001306
1307 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001308 if (!entry)
1309 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001310
1311 bacpy(&entry->bdaddr, bdaddr);
1312
1313 list_add(&entry->list, &hdev->blacklist);
1314
Antti Julku5e762442011-08-25 16:48:02 +03001315 return mgmt_device_blocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001316}
1317
1318int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319{
1320 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001321
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001322 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
Antti Julku5e762442011-08-25 16:48:02 +03001323 return hci_blacklist_clear(hdev);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001324 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001325
1326 entry = hci_blacklist_lookup(hdev, bdaddr);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001327 if (!entry) {
Antti Julku5e762442011-08-25 16:48:02 +03001328 return -ENOENT;
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001329 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001330
1331 list_del(&entry->list);
1332 kfree(entry);
1333
Antti Julku5e762442011-08-25 16:48:02 +03001334 return mgmt_device_unblocked(hdev->id, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001335}
1336
Andre Guedes35815082011-05-26 16:23:53 -03001337static void hci_clear_adv_cache(unsigned long arg)
1338{
1339 struct hci_dev *hdev = (void *) arg;
1340
1341 hci_dev_lock(hdev);
1342
1343 hci_adv_entries_clear(hdev);
1344
1345 hci_dev_unlock(hdev);
1346}
1347
Andre Guedes76c86862011-05-26 16:23:50 -03001348int hci_adv_entries_clear(struct hci_dev *hdev)
1349{
1350 struct adv_entry *entry, *tmp;
1351
1352 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353 list_del(&entry->list);
1354 kfree(entry);
1355 }
1356
1357 BT_DBG("%s adv cache cleared", hdev->name);
1358
1359 return 0;
1360}
1361
1362struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363{
1364 struct adv_entry *entry;
1365
1366 list_for_each_entry(entry, &hdev->adv_entries, list)
1367 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1368 return entry;
1369
1370 return NULL;
1371}
1372
1373static inline int is_connectable_adv(u8 evt_type)
1374{
1375 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1376 return 1;
1377
1378 return 0;
1379}
1380
1381int hci_add_adv_entry(struct hci_dev *hdev,
1382 struct hci_ev_le_advertising_info *ev)
1383{
1384 struct adv_entry *entry;
1385
1386 if (!is_connectable_adv(ev->evt_type))
1387 return -EINVAL;
1388
1389 /* Only new entries should be added to adv_entries. So, if
1390 * bdaddr was found, don't add it. */
1391 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1392 return 0;
1393
1394 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1395 if (!entry)
1396 return -ENOMEM;
1397
1398 bacpy(&entry->bdaddr, &ev->bdaddr);
1399 entry->bdaddr_type = ev->bdaddr_type;
1400
1401 list_add(&entry->list, &hdev->adv_entries);
1402
1403 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404 batostr(&entry->bdaddr), entry->bdaddr_type);
1405
1406 return 0;
1407}
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409/* Register HCI device */
1410int hci_register_dev(struct hci_dev *hdev)
1411{
1412 struct list_head *head = &hci_dev_list, *p;
David Herrmann33ca9542011-10-08 14:58:49 +02001413 int i, id = 0, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001415 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 if (!hdev->open || !hdev->close || !hdev->destruct)
1419 return -EINVAL;
1420
1421 write_lock_bh(&hci_dev_list_lock);
1422
1423 /* Find first available device id */
1424 list_for_each(p, &hci_dev_list) {
1425 if (list_entry(p, struct hci_dev, list)->id != id)
1426 break;
1427 head = p; id++;
1428 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 sprintf(hdev->name, "hci%d", id);
1431 hdev->id = id;
1432 list_add(&hdev->list, head);
1433
1434 atomic_set(&hdev->refcnt, 1);
1435 spin_lock_init(&hdev->lock);
1436
1437 hdev->flags = 0;
1438 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001439 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001441 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Marcel Holtmann04837f62006-07-03 10:02:33 +02001443 hdev->idle_timeout = 0;
1444 hdev->sniff_max_interval = 800;
1445 hdev->sniff_min_interval = 80;
1446
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001447 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1449 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1450
1451 skb_queue_head_init(&hdev->rx_q);
1452 skb_queue_head_init(&hdev->cmd_q);
1453 skb_queue_head_init(&hdev->raw_q);
1454
Ville Tervo6bd32322011-02-16 16:32:41 +02001455 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1456
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301457 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001458 hdev->reassembly[i] = NULL;
1459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001461 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 inquiry_cache_init(hdev);
1464
1465 hci_conn_hash_init(hdev);
1466
David Millerea4bd8b2010-07-30 21:54:49 -07001467 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001468
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001469 INIT_LIST_HEAD(&hdev->uuids);
1470
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001471 INIT_LIST_HEAD(&hdev->link_keys);
1472
Szymon Janc2763eda2011-03-22 13:12:22 +01001473 INIT_LIST_HEAD(&hdev->remote_oob_data);
1474
Andre Guedes76c86862011-05-26 16:23:50 -03001475 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001476 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1477 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001478
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001479 INIT_WORK(&hdev->power_on, hci_power_on);
1480 INIT_WORK(&hdev->power_off, hci_power_off);
1481 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1484
1485 atomic_set(&hdev->promisc, 0);
1486
1487 write_unlock_bh(&hci_dev_list_lock);
1488
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001489 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001490 if (!hdev->workqueue) {
1491 error = -ENOMEM;
1492 goto err;
1493 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001494
David Herrmann33ca9542011-10-08 14:58:49 +02001495 error = hci_add_sysfs(hdev);
1496 if (error < 0)
1497 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001499 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1500 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1501 if (hdev->rfkill) {
1502 if (rfkill_register(hdev->rfkill) < 0) {
1503 rfkill_destroy(hdev->rfkill);
1504 hdev->rfkill = NULL;
1505 }
1506 }
1507
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001508 set_bit(HCI_AUTO_OFF, &hdev->flags);
1509 set_bit(HCI_SETUP, &hdev->flags);
1510 queue_work(hdev->workqueue, &hdev->power_on);
1511
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 hci_notify(hdev, HCI_DEV_REG);
1513
1514 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001515
David Herrmann33ca9542011-10-08 14:58:49 +02001516err_wqueue:
1517 destroy_workqueue(hdev->workqueue);
1518err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001519 write_lock_bh(&hci_dev_list_lock);
1520 list_del(&hdev->list);
1521 write_unlock_bh(&hci_dev_list_lock);
1522
David Herrmann33ca9542011-10-08 14:58:49 +02001523 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524}
1525EXPORT_SYMBOL(hci_register_dev);
1526
1527/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001528void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
Marcel Holtmannef222012007-07-11 06:42:04 +02001530 int i;
1531
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001532 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 write_lock_bh(&hci_dev_list_lock);
1535 list_del(&hdev->list);
1536 write_unlock_bh(&hci_dev_list_lock);
1537
1538 hci_dev_do_close(hdev);
1539
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301540 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001541 kfree_skb(hdev->reassembly[i]);
1542
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001543 if (!test_bit(HCI_INIT, &hdev->flags) &&
1544 !test_bit(HCI_SETUP, &hdev->flags))
1545 mgmt_index_removed(hdev->id);
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 hci_notify(hdev, HCI_DEV_UNREG);
1548
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001549 if (hdev->rfkill) {
1550 rfkill_unregister(hdev->rfkill);
1551 rfkill_destroy(hdev->rfkill);
1552 }
1553
David Herrmannce242972011-10-08 14:58:48 +02001554 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001555
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001556 hci_del_off_timer(hdev);
Andre Guedes35815082011-05-26 16:23:53 -03001557 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001558
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001559 destroy_workqueue(hdev->workqueue);
1560
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001561 hci_dev_lock_bh(hdev);
1562 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001563 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001564 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001565 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001566 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001567 hci_dev_unlock_bh(hdev);
1568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570}
1571EXPORT_SYMBOL(hci_unregister_dev);
1572
1573/* Suspend HCI device */
1574int hci_suspend_dev(struct hci_dev *hdev)
1575{
1576 hci_notify(hdev, HCI_DEV_SUSPEND);
1577 return 0;
1578}
1579EXPORT_SYMBOL(hci_suspend_dev);
1580
1581/* Resume HCI device */
1582int hci_resume_dev(struct hci_dev *hdev)
1583{
1584 hci_notify(hdev, HCI_DEV_RESUME);
1585 return 0;
1586}
1587EXPORT_SYMBOL(hci_resume_dev);
1588
Marcel Holtmann76bca882009-11-18 00:40:39 +01001589/* Receive frame from HCI drivers */
1590int hci_recv_frame(struct sk_buff *skb)
1591{
1592 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1593 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1594 && !test_bit(HCI_INIT, &hdev->flags))) {
1595 kfree_skb(skb);
1596 return -ENXIO;
1597 }
1598
1599 /* Incomming skb */
1600 bt_cb(skb)->incoming = 1;
1601
1602 /* Time stamp */
1603 __net_timestamp(skb);
1604
1605 /* Queue frame for rx task */
1606 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001607 tasklet_schedule(&hdev->rx_task);
1608
Marcel Holtmann76bca882009-11-18 00:40:39 +01001609 return 0;
1610}
1611EXPORT_SYMBOL(hci_recv_frame);
1612
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301613static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001614 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301615{
1616 int len = 0;
1617 int hlen = 0;
1618 int remain = count;
1619 struct sk_buff *skb;
1620 struct bt_skb_cb *scb;
1621
1622 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1623 index >= NUM_REASSEMBLY)
1624 return -EILSEQ;
1625
1626 skb = hdev->reassembly[index];
1627
1628 if (!skb) {
1629 switch (type) {
1630 case HCI_ACLDATA_PKT:
1631 len = HCI_MAX_FRAME_SIZE;
1632 hlen = HCI_ACL_HDR_SIZE;
1633 break;
1634 case HCI_EVENT_PKT:
1635 len = HCI_MAX_EVENT_SIZE;
1636 hlen = HCI_EVENT_HDR_SIZE;
1637 break;
1638 case HCI_SCODATA_PKT:
1639 len = HCI_MAX_SCO_SIZE;
1640 hlen = HCI_SCO_HDR_SIZE;
1641 break;
1642 }
1643
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001644 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301645 if (!skb)
1646 return -ENOMEM;
1647
1648 scb = (void *) skb->cb;
1649 scb->expect = hlen;
1650 scb->pkt_type = type;
1651
1652 skb->dev = (void *) hdev;
1653 hdev->reassembly[index] = skb;
1654 }
1655
1656 while (count) {
1657 scb = (void *) skb->cb;
1658 len = min(scb->expect, (__u16)count);
1659
1660 memcpy(skb_put(skb, len), data, len);
1661
1662 count -= len;
1663 data += len;
1664 scb->expect -= len;
1665 remain = count;
1666
1667 switch (type) {
1668 case HCI_EVENT_PKT:
1669 if (skb->len == HCI_EVENT_HDR_SIZE) {
1670 struct hci_event_hdr *h = hci_event_hdr(skb);
1671 scb->expect = h->plen;
1672
1673 if (skb_tailroom(skb) < scb->expect) {
1674 kfree_skb(skb);
1675 hdev->reassembly[index] = NULL;
1676 return -ENOMEM;
1677 }
1678 }
1679 break;
1680
1681 case HCI_ACLDATA_PKT:
1682 if (skb->len == HCI_ACL_HDR_SIZE) {
1683 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1684 scb->expect = __le16_to_cpu(h->dlen);
1685
1686 if (skb_tailroom(skb) < scb->expect) {
1687 kfree_skb(skb);
1688 hdev->reassembly[index] = NULL;
1689 return -ENOMEM;
1690 }
1691 }
1692 break;
1693
1694 case HCI_SCODATA_PKT:
1695 if (skb->len == HCI_SCO_HDR_SIZE) {
1696 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1697 scb->expect = h->dlen;
1698
1699 if (skb_tailroom(skb) < scb->expect) {
1700 kfree_skb(skb);
1701 hdev->reassembly[index] = NULL;
1702 return -ENOMEM;
1703 }
1704 }
1705 break;
1706 }
1707
1708 if (scb->expect == 0) {
1709 /* Complete frame */
1710
1711 bt_cb(skb)->pkt_type = type;
1712 hci_recv_frame(skb);
1713
1714 hdev->reassembly[index] = NULL;
1715 return remain;
1716 }
1717 }
1718
1719 return remain;
1720}
1721
Marcel Holtmannef222012007-07-11 06:42:04 +02001722int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1723{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301724 int rem = 0;
1725
Marcel Holtmannef222012007-07-11 06:42:04 +02001726 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1727 return -EILSEQ;
1728
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001729 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001730 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301731 if (rem < 0)
1732 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001733
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301734 data += (count - rem);
1735 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001736 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001737
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301738 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001739}
1740EXPORT_SYMBOL(hci_recv_fragment);
1741
Suraj Sumangala99811512010-07-14 13:02:19 +05301742#define STREAM_REASSEMBLY 0
1743
1744int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1745{
1746 int type;
1747 int rem = 0;
1748
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001749 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301750 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1751
1752 if (!skb) {
1753 struct { char type; } *pkt;
1754
1755 /* Start of the frame */
1756 pkt = data;
1757 type = pkt->type;
1758
1759 data++;
1760 count--;
1761 } else
1762 type = bt_cb(skb)->pkt_type;
1763
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001764 rem = hci_reassembly(hdev, type, data, count,
1765 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301766 if (rem < 0)
1767 return rem;
1768
1769 data += (count - rem);
1770 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001771 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301772
1773 return rem;
1774}
1775EXPORT_SYMBOL(hci_recv_stream_fragment);
1776
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777/* ---- Interface to upper protocols ---- */
1778
1779/* Register/Unregister protocols.
1780 * hci_task_lock is used to ensure that no tasks are running. */
1781int hci_register_proto(struct hci_proto *hp)
1782{
1783 int err = 0;
1784
1785 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1786
1787 if (hp->id >= HCI_MAX_PROTO)
1788 return -EINVAL;
1789
1790 write_lock_bh(&hci_task_lock);
1791
1792 if (!hci_proto[hp->id])
1793 hci_proto[hp->id] = hp;
1794 else
1795 err = -EEXIST;
1796
1797 write_unlock_bh(&hci_task_lock);
1798
1799 return err;
1800}
1801EXPORT_SYMBOL(hci_register_proto);
1802
1803int hci_unregister_proto(struct hci_proto *hp)
1804{
1805 int err = 0;
1806
1807 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1808
1809 if (hp->id >= HCI_MAX_PROTO)
1810 return -EINVAL;
1811
1812 write_lock_bh(&hci_task_lock);
1813
1814 if (hci_proto[hp->id])
1815 hci_proto[hp->id] = NULL;
1816 else
1817 err = -ENOENT;
1818
1819 write_unlock_bh(&hci_task_lock);
1820
1821 return err;
1822}
1823EXPORT_SYMBOL(hci_unregister_proto);
1824
1825int hci_register_cb(struct hci_cb *cb)
1826{
1827 BT_DBG("%p name %s", cb, cb->name);
1828
1829 write_lock_bh(&hci_cb_list_lock);
1830 list_add(&cb->list, &hci_cb_list);
1831 write_unlock_bh(&hci_cb_list_lock);
1832
1833 return 0;
1834}
1835EXPORT_SYMBOL(hci_register_cb);
1836
1837int hci_unregister_cb(struct hci_cb *cb)
1838{
1839 BT_DBG("%p name %s", cb, cb->name);
1840
1841 write_lock_bh(&hci_cb_list_lock);
1842 list_del(&cb->list);
1843 write_unlock_bh(&hci_cb_list_lock);
1844
1845 return 0;
1846}
1847EXPORT_SYMBOL(hci_unregister_cb);
1848
1849static int hci_send_frame(struct sk_buff *skb)
1850{
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852
1853 if (!hdev) {
1854 kfree_skb(skb);
1855 return -ENODEV;
1856 }
1857
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001858 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860 if (atomic_read(&hdev->promisc)) {
1861 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001862 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001864 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 }
1866
1867 /* Get rid of skb owner, prior to sending to the driver. */
1868 skb_orphan(skb);
1869
1870 return hdev->send(skb);
1871}
1872
1873/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001874int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
1876 int len = HCI_COMMAND_HDR_SIZE + plen;
1877 struct hci_command_hdr *hdr;
1878 struct sk_buff *skb;
1879
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001880 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
1882 skb = bt_skb_alloc(len, GFP_ATOMIC);
1883 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001884 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 return -ENOMEM;
1886 }
1887
1888 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001889 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 hdr->plen = plen;
1891
1892 if (plen)
1893 memcpy(skb_put(skb, plen), param, plen);
1894
1895 BT_DBG("skb len %d", skb->len);
1896
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001897 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001899
Johan Hedberga5040ef2011-01-10 13:28:59 +02001900 if (test_bit(HCI_INIT, &hdev->flags))
1901 hdev->init_last_cmd = opcode;
1902
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001904 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
1906 return 0;
1907}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001910void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911{
1912 struct hci_command_hdr *hdr;
1913
1914 if (!hdev->sent_cmd)
1915 return NULL;
1916
1917 hdr = (void *) hdev->sent_cmd->data;
1918
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001919 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 return NULL;
1921
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001922 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
1924 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1925}
1926
1927/* Send ACL data */
1928static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1929{
1930 struct hci_acl_hdr *hdr;
1931 int len = skb->len;
1932
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001933 skb_push(skb, HCI_ACL_HDR_SIZE);
1934 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001935 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001936 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1937 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938}
1939
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001940static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1941 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942{
1943 struct hci_dev *hdev = conn->hdev;
1944 struct sk_buff *list;
1945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001946 list = skb_shinfo(skb)->frag_list;
1947 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 /* Non fragmented */
1949 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1950
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001951 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 } else {
1953 /* Fragmented */
1954 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1955
1956 skb_shinfo(skb)->frag_list = NULL;
1957
1958 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001959 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001961 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001962
1963 flags &= ~ACL_START;
1964 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 do {
1966 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001969 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001970 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1973
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001974 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 } while (list);
1976
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001977 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001979}
1980
1981void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1982{
1983 struct hci_conn *conn = chan->conn;
1984 struct hci_dev *hdev = conn->hdev;
1985
1986 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1987
1988 skb->dev = (void *) hdev;
1989 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1990 hci_add_acl_hdr(skb, conn->handle, flags);
1991
1992 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001994 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995}
1996EXPORT_SYMBOL(hci_send_acl);
1997
1998/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001999void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000{
2001 struct hci_dev *hdev = conn->hdev;
2002 struct hci_sco_hdr hdr;
2003
2004 BT_DBG("%s len %d", hdev->name, skb->len);
2005
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002006 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 hdr.dlen = skb->len;
2008
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002009 skb_push(skb, HCI_SCO_HDR_SIZE);
2010 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002011 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002012
2013 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002014 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002017 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019EXPORT_SYMBOL(hci_send_sco);
2020
2021/* ---- HCI TX task (outgoing data) ---- */
2022
2023/* HCI Connection scheduler */
2024static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2025{
2026 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002027 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002030 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002032 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002033 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002035
2036 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2037 continue;
2038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 num++;
2040
2041 if (c->sent < min) {
2042 min = c->sent;
2043 conn = c;
2044 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002045
2046 if (hci_conn_num(hdev, type) == num)
2047 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
2049
2050 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002051 int cnt, q;
2052
2053 switch (conn->type) {
2054 case ACL_LINK:
2055 cnt = hdev->acl_cnt;
2056 break;
2057 case SCO_LINK:
2058 case ESCO_LINK:
2059 cnt = hdev->sco_cnt;
2060 break;
2061 case LE_LINK:
2062 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2063 break;
2064 default:
2065 cnt = 0;
2066 BT_ERR("Unknown link type");
2067 }
2068
2069 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 *quote = q ? q : 1;
2071 } else
2072 *quote = 0;
2073
2074 BT_DBG("conn %p quote %d", conn, *quote);
2075 return conn;
2076}
2077
Ville Tervobae1f5d2011-02-10 22:38:53 -03002078static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
2080 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002081 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Ville Tervobae1f5d2011-02-10 22:38:53 -03002083 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002086 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002087 if (c->type == type && c->sent) {
2088 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 hdev->name, batostr(&c->dst));
2090 hci_acl_disconn(c, 0x13);
2091 }
2092 }
2093}
2094
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002095static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2096 int *quote)
2097{
2098 struct hci_conn_hash *h = &hdev->conn_hash;
2099 struct hci_chan *chan = NULL;
2100 int num = 0, min = ~0, cur_prio = 0;
2101 struct hci_conn *conn;
2102 int cnt, q, conn_num = 0;
2103
2104 BT_DBG("%s", hdev->name);
2105
2106 list_for_each_entry(conn, &h->list, list) {
2107 struct hci_chan_hash *ch;
2108 struct hci_chan *tmp;
2109
2110 if (conn->type != type)
2111 continue;
2112
2113 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2114 continue;
2115
2116 conn_num++;
2117
2118 ch = &conn->chan_hash;
2119
2120 list_for_each_entry(tmp, &ch->list, list) {
2121 struct sk_buff *skb;
2122
2123 if (skb_queue_empty(&tmp->data_q))
2124 continue;
2125
2126 skb = skb_peek(&tmp->data_q);
2127 if (skb->priority < cur_prio)
2128 continue;
2129
2130 if (skb->priority > cur_prio) {
2131 num = 0;
2132 min = ~0;
2133 cur_prio = skb->priority;
2134 }
2135
2136 num++;
2137
2138 if (conn->sent < min) {
2139 min = conn->sent;
2140 chan = tmp;
2141 }
2142 }
2143
2144 if (hci_conn_num(hdev, type) == conn_num)
2145 break;
2146 }
2147
2148 if (!chan)
2149 return NULL;
2150
2151 switch (chan->conn->type) {
2152 case ACL_LINK:
2153 cnt = hdev->acl_cnt;
2154 break;
2155 case SCO_LINK:
2156 case ESCO_LINK:
2157 cnt = hdev->sco_cnt;
2158 break;
2159 case LE_LINK:
2160 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2161 break;
2162 default:
2163 cnt = 0;
2164 BT_ERR("Unknown link type");
2165 }
2166
2167 q = cnt / num;
2168 *quote = q ? q : 1;
2169 BT_DBG("chan %p quote %d", chan, *quote);
2170 return chan;
2171}
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173static inline void hci_sched_acl(struct hci_dev *hdev)
2174{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002175 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 struct sk_buff *skb;
2177 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002178 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180 BT_DBG("%s", hdev->name);
2181
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002182 if (!hci_conn_num(hdev, ACL_LINK))
2183 return;
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 if (!test_bit(HCI_RAW, &hdev->flags)) {
2186 /* ACL tx timeout must be longer than maximum
2187 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002188 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002189 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
2191
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002192 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002193
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002194 while (hdev->acl_cnt &&
2195 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002196 u32 priority = (skb_peek(&chan->data_q))->priority;
2197 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002198 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2199 skb->len, skb->priority);
2200
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002201 /* Stop if priority has changed */
2202 if (skb->priority < priority)
2203 break;
2204
2205 skb = skb_dequeue(&chan->data_q);
2206
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002207 hci_conn_enter_active_mode(chan->conn,
2208 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002209
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 hci_send_frame(skb);
2211 hdev->acl_last_tx = jiffies;
2212
2213 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002214 chan->sent++;
2215 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 }
2217 }
2218}
2219
2220/* Schedule SCO */
2221static inline void hci_sched_sco(struct hci_dev *hdev)
2222{
2223 struct hci_conn *conn;
2224 struct sk_buff *skb;
2225 int quote;
2226
2227 BT_DBG("%s", hdev->name);
2228
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002229 if (!hci_conn_num(hdev, SCO_LINK))
2230 return;
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2233 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2234 BT_DBG("skb %p len %d", skb, skb->len);
2235 hci_send_frame(skb);
2236
2237 conn->sent++;
2238 if (conn->sent == ~0)
2239 conn->sent = 0;
2240 }
2241 }
2242}
2243
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002244static inline void hci_sched_esco(struct hci_dev *hdev)
2245{
2246 struct hci_conn *conn;
2247 struct sk_buff *skb;
2248 int quote;
2249
2250 BT_DBG("%s", hdev->name);
2251
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002252 if (!hci_conn_num(hdev, ESCO_LINK))
2253 return;
2254
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002255 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2256 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2257 BT_DBG("skb %p len %d", skb, skb->len);
2258 hci_send_frame(skb);
2259
2260 conn->sent++;
2261 if (conn->sent == ~0)
2262 conn->sent = 0;
2263 }
2264 }
2265}
2266
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002267static inline void hci_sched_le(struct hci_dev *hdev)
2268{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002269 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002270 struct sk_buff *skb;
2271 int quote, cnt;
2272
2273 BT_DBG("%s", hdev->name);
2274
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002275 if (!hci_conn_num(hdev, LE_LINK))
2276 return;
2277
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002278 if (!test_bit(HCI_RAW, &hdev->flags)) {
2279 /* LE tx timeout must be longer than maximum
2280 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002281 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002282 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002283 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002284 }
2285
2286 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002287 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002288 u32 priority = (skb_peek(&chan->data_q))->priority;
2289 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002290 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2291 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002292
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002293 /* Stop if priority has changed */
2294 if (skb->priority < priority)
2295 break;
2296
2297 skb = skb_dequeue(&chan->data_q);
2298
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002299 hci_send_frame(skb);
2300 hdev->le_last_tx = jiffies;
2301
2302 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002303 chan->sent++;
2304 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002305 }
2306 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002307
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002308 if (hdev->le_pkts)
2309 hdev->le_cnt = cnt;
2310 else
2311 hdev->acl_cnt = cnt;
2312}
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314static void hci_tx_task(unsigned long arg)
2315{
2316 struct hci_dev *hdev = (struct hci_dev *) arg;
2317 struct sk_buff *skb;
2318
2319 read_lock(&hci_task_lock);
2320
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002321 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2322 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324 /* Schedule queues and send stuff to HCI driver */
2325
2326 hci_sched_acl(hdev);
2327
2328 hci_sched_sco(hdev);
2329
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002330 hci_sched_esco(hdev);
2331
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002332 hci_sched_le(hdev);
2333
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 /* Send next queued raw (unknown type) packet */
2335 while ((skb = skb_dequeue(&hdev->raw_q)))
2336 hci_send_frame(skb);
2337
2338 read_unlock(&hci_task_lock);
2339}
2340
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002341/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
2343/* ACL data packet */
2344static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2345{
2346 struct hci_acl_hdr *hdr = (void *) skb->data;
2347 struct hci_conn *conn;
2348 __u16 handle, flags;
2349
2350 skb_pull(skb, HCI_ACL_HDR_SIZE);
2351
2352 handle = __le16_to_cpu(hdr->handle);
2353 flags = hci_flags(handle);
2354 handle = hci_handle(handle);
2355
2356 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2357
2358 hdev->stat.acl_rx++;
2359
2360 hci_dev_lock(hdev);
2361 conn = hci_conn_hash_lookup_handle(hdev, handle);
2362 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (conn) {
2365 register struct hci_proto *hp;
2366
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002367 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002370 hp = hci_proto[HCI_PROTO_L2CAP];
2371 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hp->recv_acldata(conn, skb, flags);
2373 return;
2374 }
2375 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002376 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 hdev->name, handle);
2378 }
2379
2380 kfree_skb(skb);
2381}
2382
2383/* SCO data packet */
2384static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2385{
2386 struct hci_sco_hdr *hdr = (void *) skb->data;
2387 struct hci_conn *conn;
2388 __u16 handle;
2389
2390 skb_pull(skb, HCI_SCO_HDR_SIZE);
2391
2392 handle = __le16_to_cpu(hdr->handle);
2393
2394 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2395
2396 hdev->stat.sco_rx++;
2397
2398 hci_dev_lock(hdev);
2399 conn = hci_conn_hash_lookup_handle(hdev, handle);
2400 hci_dev_unlock(hdev);
2401
2402 if (conn) {
2403 register struct hci_proto *hp;
2404
2405 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002406 hp = hci_proto[HCI_PROTO_SCO];
2407 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 hp->recv_scodata(conn, skb);
2409 return;
2410 }
2411 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002412 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 hdev->name, handle);
2414 }
2415
2416 kfree_skb(skb);
2417}
2418
Marcel Holtmann65164552005-10-28 19:20:48 +02002419static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420{
2421 struct hci_dev *hdev = (struct hci_dev *) arg;
2422 struct sk_buff *skb;
2423
2424 BT_DBG("%s", hdev->name);
2425
2426 read_lock(&hci_task_lock);
2427
2428 while ((skb = skb_dequeue(&hdev->rx_q))) {
2429 if (atomic_read(&hdev->promisc)) {
2430 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002431 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 }
2433
2434 if (test_bit(HCI_RAW, &hdev->flags)) {
2435 kfree_skb(skb);
2436 continue;
2437 }
2438
2439 if (test_bit(HCI_INIT, &hdev->flags)) {
2440 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002441 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 case HCI_ACLDATA_PKT:
2443 case HCI_SCODATA_PKT:
2444 kfree_skb(skb);
2445 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
2448
2449 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002450 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 case HCI_EVENT_PKT:
2452 hci_event_packet(hdev, skb);
2453 break;
2454
2455 case HCI_ACLDATA_PKT:
2456 BT_DBG("%s ACL data packet", hdev->name);
2457 hci_acldata_packet(hdev, skb);
2458 break;
2459
2460 case HCI_SCODATA_PKT:
2461 BT_DBG("%s SCO data packet", hdev->name);
2462 hci_scodata_packet(hdev, skb);
2463 break;
2464
2465 default:
2466 kfree_skb(skb);
2467 break;
2468 }
2469 }
2470
2471 read_unlock(&hci_task_lock);
2472}
2473
2474static void hci_cmd_task(unsigned long arg)
2475{
2476 struct hci_dev *hdev = (struct hci_dev *) arg;
2477 struct sk_buff *skb;
2478
2479 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002482 if (atomic_read(&hdev->cmd_cnt)) {
2483 skb = skb_dequeue(&hdev->cmd_q);
2484 if (!skb)
2485 return;
2486
Wei Yongjun7585b972009-02-25 18:29:52 +08002487 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002489 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2490 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 atomic_dec(&hdev->cmd_cnt);
2492 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002493 if (test_bit(HCI_RESET, &hdev->flags))
2494 del_timer(&hdev->cmd_timer);
2495 else
2496 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002497 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 } else {
2499 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002500 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 }
2502 }
2503}