blob: e5cf01396773f29862c90d486061e3338d8cb2c5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080076static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
Alan Sterne041c682006-03-27 01:16:30 -080087 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Marcel Holtmann65164552005-10-28 19:20:48 +020090static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Alan Sterne041c682006-03-27 01:16:30 -080092 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/* ---- HCI requests ---- */
96
Johan Hedberg23bb5762010-12-21 23:01:27 +020097void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Johan Hedberg23bb5762010-12-21 23:01:27 +020099 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
Johan Hedberga5040ef2011-01-10 13:28:59 +0200101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
193static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
200 BT_DBG("%s %ld", hdev->name, opt);
201
202 /* Driver initialization */
203
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 }
212 skb_queue_purge(&hdev->driver_init);
213
214 /* Mandatory initialization */
215
216 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200225 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231#if 0
232 /* Host buffer size */
233 {
234 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241#endif
242
243 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
255 /* Optional initialization */
256
257 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200258 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300270static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271{
272 BT_DBG("%s", hdev->name);
273
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 scan = opt;
281
282 BT_DBG("%s %x", hdev->name, scan);
283
284 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 auth = opt;
291
292 BT_DBG("%s %x", hdev->name, auth);
293
294 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 encrypt = opt;
301
302 BT_DBG("%s %x", hdev->name, encrypt);
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __le16 policy = cpu_to_le16(opt);
311
Marcel Holtmanna418b892008-11-30 12:17:28 +0100312 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200313
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316}
317
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900318/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 * Device is held on return. */
320struct hci_dev *hci_dev_get(int index)
321{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200322 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200330 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
333 break;
334 }
335 }
336 read_unlock(&hci_dev_list_lock);
337 return hdev;
338}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
340/* ---- Inquiry support ---- */
341static void inquiry_cache_flush(struct hci_dev *hdev)
342{
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
345
346 BT_DBG("cache %p", cache);
347
348 cache->list = NULL;
349 while ((e = next)) {
350 next = e->next;
351 kfree(e);
352 }
353}
354
355struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356{
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
359
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
364 break;
365 return e;
366}
367
368void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369{
370 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200381
382 ie->next = cache->list;
383 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 }
385
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 cache->timestamp = jiffies;
389}
390
391static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392{
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
396 int copied = 0;
397
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
406 info++;
407 }
408
409 BT_DBG("cache %p, copied %d", cache, copied);
410 return copied;
411}
412
413static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414{
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
417
418 BT_DBG("%s", hdev->name);
419
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
421 return;
422
423 /* Start Inquiry */
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430int hci_inquiry(void __user *arg)
431{
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
436 long timeo;
437 __u8 *buf;
438
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
440 return -EFAULT;
441
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200442 hdev = hci_dev_get(ir.dev_id);
443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return -ENODEV;
445
446 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 inquiry_cache_flush(hdev);
451 do_inquiry = 1;
452 }
453 hci_dev_unlock_bh(hdev);
454
Marcel Holtmann04837f62006-07-03 10:02:33 +0200455 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200456
457 if (do_inquiry) {
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459 if (err < 0)
460 goto done;
461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
468 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200470 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 err = -ENOMEM;
472 goto done;
473 }
474
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
478
479 BT_DBG("num_rsp %d", ir.num_rsp);
480
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482 ptr += sizeof(ir);
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484 ir.num_rsp))
485 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900486 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 err = -EFAULT;
488
489 kfree(buf);
490
491done:
492 hci_dev_put(hdev);
493 return err;
494}
495
496/* ---- HCI ioctl helpers ---- */
497
498int hci_dev_open(__u16 dev)
499{
500 struct hci_dev *hdev;
501 int ret = 0;
502
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200503 hdev = hci_dev_get(dev);
504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return -ENODEV;
506
507 BT_DBG("%s %p", hdev->name, hdev);
508
509 hci_req_lock(hdev);
510
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512 ret = -ERFKILL;
513 goto done;
514 }
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (test_bit(HCI_UP, &hdev->flags)) {
517 ret = -EALREADY;
518 goto done;
519 }
520
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
523
Marcel Holtmann943da252010-02-13 02:28:41 +0100524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 if (hdev->open(hdev)) {
529 ret = -EIO;
530 goto done;
531 }
532
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200536 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Marcel Holtmann04837f62006-07-03 10:02:33 +0200538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Andre Guedeseead27d2011-06-30 19:20:55 -0300541 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200552 if (!test_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200553 mgmt_powered(hdev, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300589 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 hci_req_unlock(hdev);
591 return 0;
592 }
593
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
597
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200598 if (hdev->discov_timeout > 0) {
599 cancel_delayed_work_sync(&hdev->discov_off);
600 hdev->discov_timeout = 0;
601 }
602
Johan Hedberg32435532011-11-07 22:16:04 +0200603 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
604 cancel_delayed_work_sync(&hdev->power_off);
605
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 hci_dev_lock_bh(hdev);
607 inquiry_cache_flush(hdev);
608 hci_conn_hash_flush(hdev);
609 hci_dev_unlock_bh(hdev);
610
611 hci_notify(hdev, HCI_DEV_DOWN);
612
613 if (hdev->flush)
614 hdev->flush(hdev);
615
616 /* Reset device */
617 skb_queue_purge(&hdev->cmd_q);
618 atomic_set(&hdev->cmd_cnt, 1);
619 if (!test_bit(HCI_RAW, &hdev->flags)) {
620 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200621 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200622 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 clear_bit(HCI_INIT, &hdev->flags);
624 }
625
626 /* Kill cmd task */
627 tasklet_kill(&hdev->cmd_task);
628
629 /* Drop queues */
630 skb_queue_purge(&hdev->rx_q);
631 skb_queue_purge(&hdev->cmd_q);
632 skb_queue_purge(&hdev->raw_q);
633
634 /* Drop last sent command */
635 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300636 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 kfree_skb(hdev->sent_cmd);
638 hdev->sent_cmd = NULL;
639 }
640
641 /* After this point our queues are empty
642 * and no tasks are scheduled. */
643 hdev->close(hdev);
644
Johan Hedberg744cf192011-11-08 20:40:14 +0200645 mgmt_powered(hdev, 0);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 /* Clear flags */
648 hdev->flags = 0;
649
650 hci_req_unlock(hdev);
651
652 hci_dev_put(hdev);
653 return 0;
654}
655
656int hci_dev_close(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int err;
660
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664 err = hci_dev_do_close(hdev);
665 hci_dev_put(hdev);
666 return err;
667}
668
669int hci_dev_reset(__u16 dev)
670{
671 struct hci_dev *hdev;
672 int ret = 0;
673
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200674 hdev = hci_dev_get(dev);
675 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return -ENODEV;
677
678 hci_req_lock(hdev);
679 tasklet_disable(&hdev->tx_task);
680
681 if (!test_bit(HCI_UP, &hdev->flags))
682 goto done;
683
684 /* Drop queues */
685 skb_queue_purge(&hdev->rx_q);
686 skb_queue_purge(&hdev->cmd_q);
687
688 hci_dev_lock_bh(hdev);
689 inquiry_cache_flush(hdev);
690 hci_conn_hash_flush(hdev);
691 hci_dev_unlock_bh(hdev);
692
693 if (hdev->flush)
694 hdev->flush(hdev);
695
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900696 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300697 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200700 ret = __hci_request(hdev, hci_reset_req, 0,
701 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703done:
704 tasklet_enable(&hdev->tx_task);
705 hci_req_unlock(hdev);
706 hci_dev_put(hdev);
707 return ret;
708}
709
710int hci_dev_reset_stat(__u16 dev)
711{
712 struct hci_dev *hdev;
713 int ret = 0;
714
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200715 hdev = hci_dev_get(dev);
716 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 return -ENODEV;
718
719 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
720
721 hci_dev_put(hdev);
722
723 return ret;
724}
725
726int hci_dev_cmd(unsigned int cmd, void __user *arg)
727{
728 struct hci_dev *hdev;
729 struct hci_dev_req dr;
730 int err = 0;
731
732 if (copy_from_user(&dr, arg, sizeof(dr)))
733 return -EFAULT;
734
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200735 hdev = hci_dev_get(dr.dev_id);
736 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 return -ENODEV;
738
739 switch (cmd) {
740 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200741 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
742 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 break;
744
745 case HCISETENCRYPT:
746 if (!lmp_encrypt_capable(hdev)) {
747 err = -EOPNOTSUPP;
748 break;
749 }
750
751 if (!test_bit(HCI_AUTH, &hdev->flags)) {
752 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200753 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 if (err)
756 break;
757 }
758
Marcel Holtmann04837f62006-07-03 10:02:33 +0200759 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 break;
762
763 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200764 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 break;
767
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200768 case HCISETLINKPOL:
769 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
771 break;
772
773 case HCISETLINKMODE:
774 hdev->link_mode = ((__u16) dr.dev_opt) &
775 (HCI_LM_MASTER | HCI_LM_ACCEPT);
776 break;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 case HCISETPTYPE:
779 hdev->pkt_type = (__u16) dr.dev_opt;
780 break;
781
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200783 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200788 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
789 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791
792 default:
793 err = -EINVAL;
794 break;
795 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 hci_dev_put(hdev);
798 return err;
799}
800
801int hci_get_dev_list(void __user *arg)
802{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200803 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 struct hci_dev_list_req *dl;
805 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 int n = 0, size, err;
807 __u16 dev_num;
808
809 if (get_user(dev_num, (__u16 __user *) arg))
810 return -EFAULT;
811
812 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
813 return -EINVAL;
814
815 size = sizeof(*dl) + dev_num * sizeof(*dr);
816
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200817 dl = kzalloc(size, GFP_KERNEL);
818 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 return -ENOMEM;
820
821 dr = dl->dev_req;
822
823 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200824 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200825 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
826 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200827
828 if (!test_bit(HCI_MGMT, &hdev->flags))
829 set_bit(HCI_PAIRABLE, &hdev->flags);
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 (dr + n)->dev_id = hdev->id;
832 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 if (++n >= dev_num)
835 break;
836 }
837 read_unlock_bh(&hci_dev_list_lock);
838
839 dl->dev_num = n;
840 size = sizeof(*dl) + n * sizeof(*dr);
841
842 err = copy_to_user(arg, dl, size);
843 kfree(dl);
844
845 return err ? -EFAULT : 0;
846}
847
848int hci_get_dev_info(void __user *arg)
849{
850 struct hci_dev *hdev;
851 struct hci_dev_info di;
852 int err = 0;
853
854 if (copy_from_user(&di, arg, sizeof(di)))
855 return -EFAULT;
856
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200857 hdev = hci_dev_get(di.dev_id);
858 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return -ENODEV;
860
Johan Hedberg32435532011-11-07 22:16:04 +0200861 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
862 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200863
Johan Hedbergc542a062011-01-26 13:11:03 +0200864 if (!test_bit(HCI_MGMT, &hdev->flags))
865 set_bit(HCI_PAIRABLE, &hdev->flags);
866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 strcpy(di.name, hdev->name);
868 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100869 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 di.flags = hdev->flags;
871 di.pkt_type = hdev->pkt_type;
872 di.acl_mtu = hdev->acl_mtu;
873 di.acl_pkts = hdev->acl_pkts;
874 di.sco_mtu = hdev->sco_mtu;
875 di.sco_pkts = hdev->sco_pkts;
876 di.link_policy = hdev->link_policy;
877 di.link_mode = hdev->link_mode;
878
879 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
880 memcpy(&di.features, &hdev->features, sizeof(di.features));
881
882 if (copy_to_user(arg, &di, sizeof(di)))
883 err = -EFAULT;
884
885 hci_dev_put(hdev);
886
887 return err;
888}
889
890/* ---- Interface to HCI drivers ---- */
891
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200892static int hci_rfkill_set_block(void *data, bool blocked)
893{
894 struct hci_dev *hdev = data;
895
896 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
897
898 if (!blocked)
899 return 0;
900
901 hci_dev_do_close(hdev);
902
903 return 0;
904}
905
906static const struct rfkill_ops hci_rfkill_ops = {
907 .set_block = hci_rfkill_set_block,
908};
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910/* Alloc HCI device */
911struct hci_dev *hci_alloc_dev(void)
912{
913 struct hci_dev *hdev;
914
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200915 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 if (!hdev)
917 return NULL;
918
David Herrmann0ac7e702011-10-08 14:58:47 +0200919 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 skb_queue_head_init(&hdev->driver_init);
921
922 return hdev;
923}
924EXPORT_SYMBOL(hci_alloc_dev);
925
926/* Free HCI device */
927void hci_free_dev(struct hci_dev *hdev)
928{
929 skb_queue_purge(&hdev->driver_init);
930
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200931 /* will free via device release */
932 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933}
934EXPORT_SYMBOL(hci_free_dev);
935
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200936static void hci_power_on(struct work_struct *work)
937{
938 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
939
940 BT_DBG("%s", hdev->name);
941
942 if (hci_dev_open(hdev->id) < 0)
943 return;
944
945 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200946 queue_delayed_work(hdev->workqueue, &hdev->power_off,
947 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200948
949 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200950 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200951}
952
953static void hci_power_off(struct work_struct *work)
954{
Johan Hedberg32435532011-11-07 22:16:04 +0200955 struct hci_dev *hdev = container_of(work, struct hci_dev,
956 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200957
958 BT_DBG("%s", hdev->name);
959
Johan Hedberg32435532011-11-07 22:16:04 +0200960 clear_bit(HCI_AUTO_OFF, &hdev->flags);
961
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200962 hci_dev_close(hdev->id);
963}
964
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200965static void hci_discov_off(struct work_struct *work)
966{
967 struct hci_dev *hdev;
968 u8 scan = SCAN_PAGE;
969
970 hdev = container_of(work, struct hci_dev, discov_off.work);
971
972 BT_DBG("%s", hdev->name);
973
974 hci_dev_lock_bh(hdev);
975
976 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
977
978 hdev->discov_timeout = 0;
979
980 hci_dev_unlock_bh(hdev);
981}
982
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200983int hci_uuids_clear(struct hci_dev *hdev)
984{
985 struct list_head *p, *n;
986
987 list_for_each_safe(p, n, &hdev->uuids) {
988 struct bt_uuid *uuid;
989
990 uuid = list_entry(p, struct bt_uuid, list);
991
992 list_del(p);
993 kfree(uuid);
994 }
995
996 return 0;
997}
998
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200999int hci_link_keys_clear(struct hci_dev *hdev)
1000{
1001 struct list_head *p, *n;
1002
1003 list_for_each_safe(p, n, &hdev->link_keys) {
1004 struct link_key *key;
1005
1006 key = list_entry(p, struct link_key, list);
1007
1008 list_del(p);
1009 kfree(key);
1010 }
1011
1012 return 0;
1013}
1014
1015struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1016{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001017 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001018
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001019 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001020 if (bacmp(bdaddr, &k->bdaddr) == 0)
1021 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001022
1023 return NULL;
1024}
1025
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001026static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1027 u8 key_type, u8 old_key_type)
1028{
1029 /* Legacy key */
1030 if (key_type < 0x03)
1031 return 1;
1032
1033 /* Debug keys are insecure so don't store them persistently */
1034 if (key_type == HCI_LK_DEBUG_COMBINATION)
1035 return 0;
1036
1037 /* Changed combination key and there's no previous one */
1038 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1039 return 0;
1040
1041 /* Security mode 3 case */
1042 if (!conn)
1043 return 1;
1044
1045 /* Neither local nor remote side had no-bonding as requirement */
1046 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1047 return 1;
1048
1049 /* Local side had dedicated bonding as requirement */
1050 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1051 return 1;
1052
1053 /* Remote side had dedicated bonding as requirement */
1054 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1055 return 1;
1056
1057 /* If none of the above criteria match, then don't store the key
1058 * persistently */
1059 return 0;
1060}
1061
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001062struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1063{
1064 struct link_key *k;
1065
1066 list_for_each_entry(k, &hdev->link_keys, list) {
1067 struct key_master_id *id;
1068
1069 if (k->type != HCI_LK_SMP_LTK)
1070 continue;
1071
1072 if (k->dlen != sizeof(*id))
1073 continue;
1074
1075 id = (void *) &k->data;
1076 if (id->ediv == ediv &&
1077 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1078 return k;
1079 }
1080
1081 return NULL;
1082}
1083EXPORT_SYMBOL(hci_find_ltk);
1084
1085struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1086 bdaddr_t *bdaddr, u8 type)
1087{
1088 struct link_key *k;
1089
1090 list_for_each_entry(k, &hdev->link_keys, list)
1091 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1092 return k;
1093
1094 return NULL;
1095}
1096EXPORT_SYMBOL(hci_find_link_key_type);
1097
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001098int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1099 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001100{
1101 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001102 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001103
1104 old_key = hci_find_link_key(hdev, bdaddr);
1105 if (old_key) {
1106 old_key_type = old_key->type;
1107 key = old_key;
1108 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001109 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001110 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1111 if (!key)
1112 return -ENOMEM;
1113 list_add(&key->list, &hdev->link_keys);
1114 }
1115
1116 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1117
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001118 /* Some buggy controller combinations generate a changed
1119 * combination key for legacy pairing even when there's no
1120 * previous key */
1121 if (type == HCI_LK_CHANGED_COMBINATION &&
1122 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001123 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001124 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001125 if (conn)
1126 conn->key_type = type;
1127 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001128
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 bacpy(&key->bdaddr, bdaddr);
1130 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001131 key->pin_len = pin_len;
1132
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001133 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001134 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001135 else
1136 key->type = type;
1137
Johan Hedberg4df378a2011-04-28 11:29:03 -07001138 if (!new_key)
1139 return 0;
1140
1141 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1142
Johan Hedberg744cf192011-11-08 20:40:14 +02001143 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001144
1145 if (!persistent) {
1146 list_del(&key->list);
1147 kfree(key);
1148 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001149
1150 return 0;
1151}
1152
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001153int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001154 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001155{
1156 struct link_key *key, *old_key;
1157 struct key_master_id *id;
1158 u8 old_key_type;
1159
1160 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1161
1162 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1163 if (old_key) {
1164 key = old_key;
1165 old_key_type = old_key->type;
1166 } else {
1167 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1168 if (!key)
1169 return -ENOMEM;
1170 list_add(&key->list, &hdev->link_keys);
1171 old_key_type = 0xff;
1172 }
1173
1174 key->dlen = sizeof(*id);
1175
1176 bacpy(&key->bdaddr, bdaddr);
1177 memcpy(key->val, ltk, sizeof(key->val));
1178 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001179 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001180
1181 id = (void *) &key->data;
1182 id->ediv = ediv;
1183 memcpy(id->rand, rand, sizeof(id->rand));
1184
1185 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001186 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001187
1188 return 0;
1189}
1190
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001191int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1192{
1193 struct link_key *key;
1194
1195 key = hci_find_link_key(hdev, bdaddr);
1196 if (!key)
1197 return -ENOENT;
1198
1199 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1200
1201 list_del(&key->list);
1202 kfree(key);
1203
1204 return 0;
1205}
1206
Ville Tervo6bd32322011-02-16 16:32:41 +02001207/* HCI command timer function */
1208static void hci_cmd_timer(unsigned long arg)
1209{
1210 struct hci_dev *hdev = (void *) arg;
1211
1212 BT_ERR("%s command tx timeout", hdev->name);
1213 atomic_set(&hdev->cmd_cnt, 1);
1214 tasklet_schedule(&hdev->cmd_task);
1215}
1216
Szymon Janc2763eda2011-03-22 13:12:22 +01001217struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1218 bdaddr_t *bdaddr)
1219{
1220 struct oob_data *data;
1221
1222 list_for_each_entry(data, &hdev->remote_oob_data, list)
1223 if (bacmp(bdaddr, &data->bdaddr) == 0)
1224 return data;
1225
1226 return NULL;
1227}
1228
1229int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1230{
1231 struct oob_data *data;
1232
1233 data = hci_find_remote_oob_data(hdev, bdaddr);
1234 if (!data)
1235 return -ENOENT;
1236
1237 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1238
1239 list_del(&data->list);
1240 kfree(data);
1241
1242 return 0;
1243}
1244
1245int hci_remote_oob_data_clear(struct hci_dev *hdev)
1246{
1247 struct oob_data *data, *n;
1248
1249 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1250 list_del(&data->list);
1251 kfree(data);
1252 }
1253
1254 return 0;
1255}
1256
1257int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1258 u8 *randomizer)
1259{
1260 struct oob_data *data;
1261
1262 data = hci_find_remote_oob_data(hdev, bdaddr);
1263
1264 if (!data) {
1265 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1266 if (!data)
1267 return -ENOMEM;
1268
1269 bacpy(&data->bdaddr, bdaddr);
1270 list_add(&data->list, &hdev->remote_oob_data);
1271 }
1272
1273 memcpy(data->hash, hash, sizeof(data->hash));
1274 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1275
1276 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1277
1278 return 0;
1279}
1280
Antti Julkub2a66aa2011-06-15 12:01:14 +03001281struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1282 bdaddr_t *bdaddr)
1283{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001284 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001285
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001286 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001287 if (bacmp(bdaddr, &b->bdaddr) == 0)
1288 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001289
1290 return NULL;
1291}
1292
1293int hci_blacklist_clear(struct hci_dev *hdev)
1294{
1295 struct list_head *p, *n;
1296
1297 list_for_each_safe(p, n, &hdev->blacklist) {
1298 struct bdaddr_list *b;
1299
1300 b = list_entry(p, struct bdaddr_list, list);
1301
1302 list_del(p);
1303 kfree(b);
1304 }
1305
1306 return 0;
1307}
1308
1309int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001312
1313 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1314 return -EBADF;
1315
Antti Julku5e762442011-08-25 16:48:02 +03001316 if (hci_blacklist_lookup(hdev, bdaddr))
1317 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001318
1319 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001320 if (!entry)
1321 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001322
1323 bacpy(&entry->bdaddr, bdaddr);
1324
1325 list_add(&entry->list, &hdev->blacklist);
1326
Johan Hedberg744cf192011-11-08 20:40:14 +02001327 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001328}
1329
1330int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1331{
1332 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001333
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001334 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
Antti Julku5e762442011-08-25 16:48:02 +03001335 return hci_blacklist_clear(hdev);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001336 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001337
1338 entry = hci_blacklist_lookup(hdev, bdaddr);
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001339 if (!entry) {
Antti Julku5e762442011-08-25 16:48:02 +03001340 return -ENOENT;
Gustavo F. Padovana7925bd2011-06-17 16:15:10 -03001341 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03001342
1343 list_del(&entry->list);
1344 kfree(entry);
1345
Johan Hedberg744cf192011-11-08 20:40:14 +02001346 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001347}
1348
Andre Guedes35815082011-05-26 16:23:53 -03001349static void hci_clear_adv_cache(unsigned long arg)
1350{
1351 struct hci_dev *hdev = (void *) arg;
1352
1353 hci_dev_lock(hdev);
1354
1355 hci_adv_entries_clear(hdev);
1356
1357 hci_dev_unlock(hdev);
1358}
1359
Andre Guedes76c86862011-05-26 16:23:50 -03001360int hci_adv_entries_clear(struct hci_dev *hdev)
1361{
1362 struct adv_entry *entry, *tmp;
1363
1364 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1365 list_del(&entry->list);
1366 kfree(entry);
1367 }
1368
1369 BT_DBG("%s adv cache cleared", hdev->name);
1370
1371 return 0;
1372}
1373
1374struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1375{
1376 struct adv_entry *entry;
1377
1378 list_for_each_entry(entry, &hdev->adv_entries, list)
1379 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1380 return entry;
1381
1382 return NULL;
1383}
1384
1385static inline int is_connectable_adv(u8 evt_type)
1386{
1387 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1388 return 1;
1389
1390 return 0;
1391}
1392
1393int hci_add_adv_entry(struct hci_dev *hdev,
1394 struct hci_ev_le_advertising_info *ev)
1395{
1396 struct adv_entry *entry;
1397
1398 if (!is_connectable_adv(ev->evt_type))
1399 return -EINVAL;
1400
1401 /* Only new entries should be added to adv_entries. So, if
1402 * bdaddr was found, don't add it. */
1403 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1404 return 0;
1405
1406 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1407 if (!entry)
1408 return -ENOMEM;
1409
1410 bacpy(&entry->bdaddr, &ev->bdaddr);
1411 entry->bdaddr_type = ev->bdaddr_type;
1412
1413 list_add(&entry->list, &hdev->adv_entries);
1414
1415 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1416 batostr(&entry->bdaddr), entry->bdaddr_type);
1417
1418 return 0;
1419}
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421/* Register HCI device */
1422int hci_register_dev(struct hci_dev *hdev)
1423{
1424 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001425 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001427 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1428 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 if (!hdev->open || !hdev->close || !hdev->destruct)
1431 return -EINVAL;
1432
Mat Martineau08add512011-11-02 16:18:36 -07001433 /* Do not allow HCI_AMP devices to register at index 0,
1434 * so the index can be used as the AMP controller ID.
1435 */
1436 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 write_lock_bh(&hci_dev_list_lock);
1439
1440 /* Find first available device id */
1441 list_for_each(p, &hci_dev_list) {
1442 if (list_entry(p, struct hci_dev, list)->id != id)
1443 break;
1444 head = p; id++;
1445 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 sprintf(hdev->name, "hci%d", id);
1448 hdev->id = id;
1449 list_add(&hdev->list, head);
1450
1451 atomic_set(&hdev->refcnt, 1);
1452 spin_lock_init(&hdev->lock);
1453
1454 hdev->flags = 0;
1455 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001456 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001458 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Marcel Holtmann04837f62006-07-03 10:02:33 +02001460 hdev->idle_timeout = 0;
1461 hdev->sniff_max_interval = 800;
1462 hdev->sniff_min_interval = 80;
1463
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001464 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1466 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1467
1468 skb_queue_head_init(&hdev->rx_q);
1469 skb_queue_head_init(&hdev->cmd_q);
1470 skb_queue_head_init(&hdev->raw_q);
1471
Ville Tervo6bd32322011-02-16 16:32:41 +02001472 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1473
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301474 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001475 hdev->reassembly[i] = NULL;
1476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001478 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 inquiry_cache_init(hdev);
1481
1482 hci_conn_hash_init(hdev);
1483
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001484 INIT_LIST_HEAD(&hdev->mgmt_pending);
1485
David Millerea4bd8b2010-07-30 21:54:49 -07001486 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001487
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001488 INIT_LIST_HEAD(&hdev->uuids);
1489
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001490 INIT_LIST_HEAD(&hdev->link_keys);
1491
Szymon Janc2763eda2011-03-22 13:12:22 +01001492 INIT_LIST_HEAD(&hdev->remote_oob_data);
1493
Andre Guedes76c86862011-05-26 16:23:50 -03001494 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001495 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1496 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001497
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001498 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001499 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001500
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001501 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1502
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1504
1505 atomic_set(&hdev->promisc, 0);
1506
1507 write_unlock_bh(&hci_dev_list_lock);
1508
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001509 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001510 if (!hdev->workqueue) {
1511 error = -ENOMEM;
1512 goto err;
1513 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001514
David Herrmann33ca9542011-10-08 14:58:49 +02001515 error = hci_add_sysfs(hdev);
1516 if (error < 0)
1517 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001519 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1520 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1521 if (hdev->rfkill) {
1522 if (rfkill_register(hdev->rfkill) < 0) {
1523 rfkill_destroy(hdev->rfkill);
1524 hdev->rfkill = NULL;
1525 }
1526 }
1527
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001528 set_bit(HCI_AUTO_OFF, &hdev->flags);
1529 set_bit(HCI_SETUP, &hdev->flags);
1530 queue_work(hdev->workqueue, &hdev->power_on);
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 hci_notify(hdev, HCI_DEV_REG);
1533
1534 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001535
David Herrmann33ca9542011-10-08 14:58:49 +02001536err_wqueue:
1537 destroy_workqueue(hdev->workqueue);
1538err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001539 write_lock_bh(&hci_dev_list_lock);
1540 list_del(&hdev->list);
1541 write_unlock_bh(&hci_dev_list_lock);
1542
David Herrmann33ca9542011-10-08 14:58:49 +02001543 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544}
1545EXPORT_SYMBOL(hci_register_dev);
1546
1547/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001548void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549{
Marcel Holtmannef222012007-07-11 06:42:04 +02001550 int i;
1551
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001552 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 write_lock_bh(&hci_dev_list_lock);
1555 list_del(&hdev->list);
1556 write_unlock_bh(&hci_dev_list_lock);
1557
1558 hci_dev_do_close(hdev);
1559
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301560 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001561 kfree_skb(hdev->reassembly[i]);
1562
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001563 if (!test_bit(HCI_INIT, &hdev->flags) &&
1564 !test_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001565 mgmt_index_removed(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001566
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001567 /* mgmt_index_removed should take care of emptying the
1568 * pending list */
1569 BUG_ON(!list_empty(&hdev->mgmt_pending));
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 hci_notify(hdev, HCI_DEV_UNREG);
1572
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001573 if (hdev->rfkill) {
1574 rfkill_unregister(hdev->rfkill);
1575 rfkill_destroy(hdev->rfkill);
1576 }
1577
David Herrmannce242972011-10-08 14:58:48 +02001578 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001579
Andre Guedes35815082011-05-26 16:23:53 -03001580 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001581
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001582 destroy_workqueue(hdev->workqueue);
1583
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001584 hci_dev_lock_bh(hdev);
1585 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001586 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001587 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001588 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001589 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001590 hci_dev_unlock_bh(hdev);
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593}
1594EXPORT_SYMBOL(hci_unregister_dev);
1595
1596/* Suspend HCI device */
1597int hci_suspend_dev(struct hci_dev *hdev)
1598{
1599 hci_notify(hdev, HCI_DEV_SUSPEND);
1600 return 0;
1601}
1602EXPORT_SYMBOL(hci_suspend_dev);
1603
1604/* Resume HCI device */
1605int hci_resume_dev(struct hci_dev *hdev)
1606{
1607 hci_notify(hdev, HCI_DEV_RESUME);
1608 return 0;
1609}
1610EXPORT_SYMBOL(hci_resume_dev);
1611
Marcel Holtmann76bca882009-11-18 00:40:39 +01001612/* Receive frame from HCI drivers */
1613int hci_recv_frame(struct sk_buff *skb)
1614{
1615 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1616 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1617 && !test_bit(HCI_INIT, &hdev->flags))) {
1618 kfree_skb(skb);
1619 return -ENXIO;
1620 }
1621
1622 /* Incomming skb */
1623 bt_cb(skb)->incoming = 1;
1624
1625 /* Time stamp */
1626 __net_timestamp(skb);
1627
1628 /* Queue frame for rx task */
1629 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001630 tasklet_schedule(&hdev->rx_task);
1631
Marcel Holtmann76bca882009-11-18 00:40:39 +01001632 return 0;
1633}
1634EXPORT_SYMBOL(hci_recv_frame);
1635
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301636static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001637 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301638{
1639 int len = 0;
1640 int hlen = 0;
1641 int remain = count;
1642 struct sk_buff *skb;
1643 struct bt_skb_cb *scb;
1644
1645 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1646 index >= NUM_REASSEMBLY)
1647 return -EILSEQ;
1648
1649 skb = hdev->reassembly[index];
1650
1651 if (!skb) {
1652 switch (type) {
1653 case HCI_ACLDATA_PKT:
1654 len = HCI_MAX_FRAME_SIZE;
1655 hlen = HCI_ACL_HDR_SIZE;
1656 break;
1657 case HCI_EVENT_PKT:
1658 len = HCI_MAX_EVENT_SIZE;
1659 hlen = HCI_EVENT_HDR_SIZE;
1660 break;
1661 case HCI_SCODATA_PKT:
1662 len = HCI_MAX_SCO_SIZE;
1663 hlen = HCI_SCO_HDR_SIZE;
1664 break;
1665 }
1666
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001667 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301668 if (!skb)
1669 return -ENOMEM;
1670
1671 scb = (void *) skb->cb;
1672 scb->expect = hlen;
1673 scb->pkt_type = type;
1674
1675 skb->dev = (void *) hdev;
1676 hdev->reassembly[index] = skb;
1677 }
1678
1679 while (count) {
1680 scb = (void *) skb->cb;
1681 len = min(scb->expect, (__u16)count);
1682
1683 memcpy(skb_put(skb, len), data, len);
1684
1685 count -= len;
1686 data += len;
1687 scb->expect -= len;
1688 remain = count;
1689
1690 switch (type) {
1691 case HCI_EVENT_PKT:
1692 if (skb->len == HCI_EVENT_HDR_SIZE) {
1693 struct hci_event_hdr *h = hci_event_hdr(skb);
1694 scb->expect = h->plen;
1695
1696 if (skb_tailroom(skb) < scb->expect) {
1697 kfree_skb(skb);
1698 hdev->reassembly[index] = NULL;
1699 return -ENOMEM;
1700 }
1701 }
1702 break;
1703
1704 case HCI_ACLDATA_PKT:
1705 if (skb->len == HCI_ACL_HDR_SIZE) {
1706 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1707 scb->expect = __le16_to_cpu(h->dlen);
1708
1709 if (skb_tailroom(skb) < scb->expect) {
1710 kfree_skb(skb);
1711 hdev->reassembly[index] = NULL;
1712 return -ENOMEM;
1713 }
1714 }
1715 break;
1716
1717 case HCI_SCODATA_PKT:
1718 if (skb->len == HCI_SCO_HDR_SIZE) {
1719 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1720 scb->expect = h->dlen;
1721
1722 if (skb_tailroom(skb) < scb->expect) {
1723 kfree_skb(skb);
1724 hdev->reassembly[index] = NULL;
1725 return -ENOMEM;
1726 }
1727 }
1728 break;
1729 }
1730
1731 if (scb->expect == 0) {
1732 /* Complete frame */
1733
1734 bt_cb(skb)->pkt_type = type;
1735 hci_recv_frame(skb);
1736
1737 hdev->reassembly[index] = NULL;
1738 return remain;
1739 }
1740 }
1741
1742 return remain;
1743}
1744
Marcel Holtmannef222012007-07-11 06:42:04 +02001745int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1746{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301747 int rem = 0;
1748
Marcel Holtmannef222012007-07-11 06:42:04 +02001749 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1750 return -EILSEQ;
1751
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001752 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001753 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301754 if (rem < 0)
1755 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001756
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301757 data += (count - rem);
1758 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001759 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001760
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301761 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001762}
1763EXPORT_SYMBOL(hci_recv_fragment);
1764
Suraj Sumangala99811512010-07-14 13:02:19 +05301765#define STREAM_REASSEMBLY 0
1766
1767int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1768{
1769 int type;
1770 int rem = 0;
1771
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001772 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301773 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1774
1775 if (!skb) {
1776 struct { char type; } *pkt;
1777
1778 /* Start of the frame */
1779 pkt = data;
1780 type = pkt->type;
1781
1782 data++;
1783 count--;
1784 } else
1785 type = bt_cb(skb)->pkt_type;
1786
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001787 rem = hci_reassembly(hdev, type, data, count,
1788 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301789 if (rem < 0)
1790 return rem;
1791
1792 data += (count - rem);
1793 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001794 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301795
1796 return rem;
1797}
1798EXPORT_SYMBOL(hci_recv_stream_fragment);
1799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800/* ---- Interface to upper protocols ---- */
1801
1802/* Register/Unregister protocols.
1803 * hci_task_lock is used to ensure that no tasks are running. */
1804int hci_register_proto(struct hci_proto *hp)
1805{
1806 int err = 0;
1807
1808 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1809
1810 if (hp->id >= HCI_MAX_PROTO)
1811 return -EINVAL;
1812
1813 write_lock_bh(&hci_task_lock);
1814
1815 if (!hci_proto[hp->id])
1816 hci_proto[hp->id] = hp;
1817 else
1818 err = -EEXIST;
1819
1820 write_unlock_bh(&hci_task_lock);
1821
1822 return err;
1823}
1824EXPORT_SYMBOL(hci_register_proto);
1825
1826int hci_unregister_proto(struct hci_proto *hp)
1827{
1828 int err = 0;
1829
1830 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1831
1832 if (hp->id >= HCI_MAX_PROTO)
1833 return -EINVAL;
1834
1835 write_lock_bh(&hci_task_lock);
1836
1837 if (hci_proto[hp->id])
1838 hci_proto[hp->id] = NULL;
1839 else
1840 err = -ENOENT;
1841
1842 write_unlock_bh(&hci_task_lock);
1843
1844 return err;
1845}
1846EXPORT_SYMBOL(hci_unregister_proto);
1847
1848int hci_register_cb(struct hci_cb *cb)
1849{
1850 BT_DBG("%p name %s", cb, cb->name);
1851
1852 write_lock_bh(&hci_cb_list_lock);
1853 list_add(&cb->list, &hci_cb_list);
1854 write_unlock_bh(&hci_cb_list_lock);
1855
1856 return 0;
1857}
1858EXPORT_SYMBOL(hci_register_cb);
1859
1860int hci_unregister_cb(struct hci_cb *cb)
1861{
1862 BT_DBG("%p name %s", cb, cb->name);
1863
1864 write_lock_bh(&hci_cb_list_lock);
1865 list_del(&cb->list);
1866 write_unlock_bh(&hci_cb_list_lock);
1867
1868 return 0;
1869}
1870EXPORT_SYMBOL(hci_unregister_cb);
1871
1872static int hci_send_frame(struct sk_buff *skb)
1873{
1874 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1875
1876 if (!hdev) {
1877 kfree_skb(skb);
1878 return -ENODEV;
1879 }
1880
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001881 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 if (atomic_read(&hdev->promisc)) {
1884 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001885 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001887 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 }
1889
1890 /* Get rid of skb owner, prior to sending to the driver. */
1891 skb_orphan(skb);
1892
1893 return hdev->send(skb);
1894}
1895
1896/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001897int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898{
1899 int len = HCI_COMMAND_HDR_SIZE + plen;
1900 struct hci_command_hdr *hdr;
1901 struct sk_buff *skb;
1902
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001903 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904
1905 skb = bt_skb_alloc(len, GFP_ATOMIC);
1906 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001907 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 return -ENOMEM;
1909 }
1910
1911 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001912 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 hdr->plen = plen;
1914
1915 if (plen)
1916 memcpy(skb_put(skb, plen), param, plen);
1917
1918 BT_DBG("skb len %d", skb->len);
1919
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001920 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001922
Johan Hedberga5040ef2011-01-10 13:28:59 +02001923 if (test_bit(HCI_INIT, &hdev->flags))
1924 hdev->init_last_cmd = opcode;
1925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001927 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929 return 0;
1930}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
1932/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001933void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934{
1935 struct hci_command_hdr *hdr;
1936
1937 if (!hdev->sent_cmd)
1938 return NULL;
1939
1940 hdr = (void *) hdev->sent_cmd->data;
1941
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001942 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 return NULL;
1944
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001945 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946
1947 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1948}
1949
1950/* Send ACL data */
1951static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1952{
1953 struct hci_acl_hdr *hdr;
1954 int len = skb->len;
1955
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001956 skb_push(skb, HCI_ACL_HDR_SIZE);
1957 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001958 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001959 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1960 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961}
1962
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001963static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1964 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965{
1966 struct hci_dev *hdev = conn->hdev;
1967 struct sk_buff *list;
1968
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001969 list = skb_shinfo(skb)->frag_list;
1970 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 /* Non fragmented */
1972 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1973
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001974 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 } else {
1976 /* Fragmented */
1977 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1978
1979 skb_shinfo(skb)->frag_list = NULL;
1980
1981 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001982 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001984 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001985
1986 flags &= ~ACL_START;
1987 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 do {
1989 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001990
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001992 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001993 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1996
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001997 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998 } while (list);
1999
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002000 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002002}
2003
2004void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2005{
2006 struct hci_conn *conn = chan->conn;
2007 struct hci_dev *hdev = conn->hdev;
2008
2009 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2010
2011 skb->dev = (void *) hdev;
2012 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2013 hci_add_acl_hdr(skb, conn->handle, flags);
2014
2015 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002017 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018}
2019EXPORT_SYMBOL(hci_send_acl);
2020
2021/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002022void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023{
2024 struct hci_dev *hdev = conn->hdev;
2025 struct hci_sco_hdr hdr;
2026
2027 BT_DBG("%s len %d", hdev->name, skb->len);
2028
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002029 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 hdr.dlen = skb->len;
2031
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002032 skb_push(skb, HCI_SCO_HDR_SIZE);
2033 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002034 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002037 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002040 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041}
2042EXPORT_SYMBOL(hci_send_sco);
2043
2044/* ---- HCI TX task (outgoing data) ---- */
2045
2046/* HCI Connection scheduler */
2047static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2048{
2049 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002050 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002053 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002055 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002056 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002058
2059 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2060 continue;
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 num++;
2063
2064 if (c->sent < min) {
2065 min = c->sent;
2066 conn = c;
2067 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002068
2069 if (hci_conn_num(hdev, type) == num)
2070 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
2072
2073 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002074 int cnt, q;
2075
2076 switch (conn->type) {
2077 case ACL_LINK:
2078 cnt = hdev->acl_cnt;
2079 break;
2080 case SCO_LINK:
2081 case ESCO_LINK:
2082 cnt = hdev->sco_cnt;
2083 break;
2084 case LE_LINK:
2085 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2086 break;
2087 default:
2088 cnt = 0;
2089 BT_ERR("Unknown link type");
2090 }
2091
2092 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 *quote = q ? q : 1;
2094 } else
2095 *quote = 0;
2096
2097 BT_DBG("conn %p quote %d", conn, *quote);
2098 return conn;
2099}
2100
Ville Tervobae1f5d2011-02-10 22:38:53 -03002101static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102{
2103 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002104 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
Ville Tervobae1f5d2011-02-10 22:38:53 -03002106 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002109 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002110 if (c->type == type && c->sent) {
2111 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 hdev->name, batostr(&c->dst));
2113 hci_acl_disconn(c, 0x13);
2114 }
2115 }
2116}
2117
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002118static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2119 int *quote)
2120{
2121 struct hci_conn_hash *h = &hdev->conn_hash;
2122 struct hci_chan *chan = NULL;
2123 int num = 0, min = ~0, cur_prio = 0;
2124 struct hci_conn *conn;
2125 int cnt, q, conn_num = 0;
2126
2127 BT_DBG("%s", hdev->name);
2128
2129 list_for_each_entry(conn, &h->list, list) {
2130 struct hci_chan_hash *ch;
2131 struct hci_chan *tmp;
2132
2133 if (conn->type != type)
2134 continue;
2135
2136 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2137 continue;
2138
2139 conn_num++;
2140
2141 ch = &conn->chan_hash;
2142
2143 list_for_each_entry(tmp, &ch->list, list) {
2144 struct sk_buff *skb;
2145
2146 if (skb_queue_empty(&tmp->data_q))
2147 continue;
2148
2149 skb = skb_peek(&tmp->data_q);
2150 if (skb->priority < cur_prio)
2151 continue;
2152
2153 if (skb->priority > cur_prio) {
2154 num = 0;
2155 min = ~0;
2156 cur_prio = skb->priority;
2157 }
2158
2159 num++;
2160
2161 if (conn->sent < min) {
2162 min = conn->sent;
2163 chan = tmp;
2164 }
2165 }
2166
2167 if (hci_conn_num(hdev, type) == conn_num)
2168 break;
2169 }
2170
2171 if (!chan)
2172 return NULL;
2173
2174 switch (chan->conn->type) {
2175 case ACL_LINK:
2176 cnt = hdev->acl_cnt;
2177 break;
2178 case SCO_LINK:
2179 case ESCO_LINK:
2180 cnt = hdev->sco_cnt;
2181 break;
2182 case LE_LINK:
2183 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2184 break;
2185 default:
2186 cnt = 0;
2187 BT_ERR("Unknown link type");
2188 }
2189
2190 q = cnt / num;
2191 *quote = q ? q : 1;
2192 BT_DBG("chan %p quote %d", chan, *quote);
2193 return chan;
2194}
2195
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002196static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2197{
2198 struct hci_conn_hash *h = &hdev->conn_hash;
2199 struct hci_conn *conn;
2200 int num = 0;
2201
2202 BT_DBG("%s", hdev->name);
2203
2204 list_for_each_entry(conn, &h->list, list) {
2205 struct hci_chan_hash *ch;
2206 struct hci_chan *chan;
2207
2208 if (conn->type != type)
2209 continue;
2210
2211 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2212 continue;
2213
2214 num++;
2215
2216 ch = &conn->chan_hash;
2217 list_for_each_entry(chan, &ch->list, list) {
2218 struct sk_buff *skb;
2219
2220 if (chan->sent) {
2221 chan->sent = 0;
2222 continue;
2223 }
2224
2225 if (skb_queue_empty(&chan->data_q))
2226 continue;
2227
2228 skb = skb_peek(&chan->data_q);
2229 if (skb->priority >= HCI_PRIO_MAX - 1)
2230 continue;
2231
2232 skb->priority = HCI_PRIO_MAX - 1;
2233
2234 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2235 skb->priority);
2236 }
2237
2238 if (hci_conn_num(hdev, type) == num)
2239 break;
2240 }
2241}
2242
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243static inline void hci_sched_acl(struct hci_dev *hdev)
2244{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002245 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 struct sk_buff *skb;
2247 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002248 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
2250 BT_DBG("%s", hdev->name);
2251
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002252 if (!hci_conn_num(hdev, ACL_LINK))
2253 return;
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 if (!test_bit(HCI_RAW, &hdev->flags)) {
2256 /* ACL tx timeout must be longer than maximum
2257 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002258 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002259 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
2261
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002262 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002263
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002264 while (hdev->acl_cnt &&
2265 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002266 u32 priority = (skb_peek(&chan->data_q))->priority;
2267 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002268 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2269 skb->len, skb->priority);
2270
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002271 /* Stop if priority has changed */
2272 if (skb->priority < priority)
2273 break;
2274
2275 skb = skb_dequeue(&chan->data_q);
2276
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002277 hci_conn_enter_active_mode(chan->conn,
2278 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002279
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 hci_send_frame(skb);
2281 hdev->acl_last_tx = jiffies;
2282
2283 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002284 chan->sent++;
2285 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 }
2287 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002288
2289 if (cnt != hdev->acl_cnt)
2290 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291}
2292
2293/* Schedule SCO */
2294static inline void hci_sched_sco(struct hci_dev *hdev)
2295{
2296 struct hci_conn *conn;
2297 struct sk_buff *skb;
2298 int quote;
2299
2300 BT_DBG("%s", hdev->name);
2301
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002302 if (!hci_conn_num(hdev, SCO_LINK))
2303 return;
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2306 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2307 BT_DBG("skb %p len %d", skb, skb->len);
2308 hci_send_frame(skb);
2309
2310 conn->sent++;
2311 if (conn->sent == ~0)
2312 conn->sent = 0;
2313 }
2314 }
2315}
2316
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002317static inline void hci_sched_esco(struct hci_dev *hdev)
2318{
2319 struct hci_conn *conn;
2320 struct sk_buff *skb;
2321 int quote;
2322
2323 BT_DBG("%s", hdev->name);
2324
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002325 if (!hci_conn_num(hdev, ESCO_LINK))
2326 return;
2327
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002328 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2330 BT_DBG("skb %p len %d", skb, skb->len);
2331 hci_send_frame(skb);
2332
2333 conn->sent++;
2334 if (conn->sent == ~0)
2335 conn->sent = 0;
2336 }
2337 }
2338}
2339
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002340static inline void hci_sched_le(struct hci_dev *hdev)
2341{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002342 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002343 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002344 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002345
2346 BT_DBG("%s", hdev->name);
2347
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002348 if (!hci_conn_num(hdev, LE_LINK))
2349 return;
2350
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002351 if (!test_bit(HCI_RAW, &hdev->flags)) {
2352 /* LE tx timeout must be longer than maximum
2353 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002354 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002355 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002356 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002357 }
2358
2359 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002360 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002361 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002362 u32 priority = (skb_peek(&chan->data_q))->priority;
2363 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002364 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2365 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002366
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002367 /* Stop if priority has changed */
2368 if (skb->priority < priority)
2369 break;
2370
2371 skb = skb_dequeue(&chan->data_q);
2372
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002373 hci_send_frame(skb);
2374 hdev->le_last_tx = jiffies;
2375
2376 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002377 chan->sent++;
2378 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002379 }
2380 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002382 if (hdev->le_pkts)
2383 hdev->le_cnt = cnt;
2384 else
2385 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002386
2387 if (cnt != tmp)
2388 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002389}
2390
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391static void hci_tx_task(unsigned long arg)
2392{
2393 struct hci_dev *hdev = (struct hci_dev *) arg;
2394 struct sk_buff *skb;
2395
2396 read_lock(&hci_task_lock);
2397
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002398 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2399 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
2401 /* Schedule queues and send stuff to HCI driver */
2402
2403 hci_sched_acl(hdev);
2404
2405 hci_sched_sco(hdev);
2406
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002407 hci_sched_esco(hdev);
2408
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002409 hci_sched_le(hdev);
2410
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 /* Send next queued raw (unknown type) packet */
2412 while ((skb = skb_dequeue(&hdev->raw_q)))
2413 hci_send_frame(skb);
2414
2415 read_unlock(&hci_task_lock);
2416}
2417
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002418/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419
2420/* ACL data packet */
2421static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2422{
2423 struct hci_acl_hdr *hdr = (void *) skb->data;
2424 struct hci_conn *conn;
2425 __u16 handle, flags;
2426
2427 skb_pull(skb, HCI_ACL_HDR_SIZE);
2428
2429 handle = __le16_to_cpu(hdr->handle);
2430 flags = hci_flags(handle);
2431 handle = hci_handle(handle);
2432
2433 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2434
2435 hdev->stat.acl_rx++;
2436
2437 hci_dev_lock(hdev);
2438 conn = hci_conn_hash_lookup_handle(hdev, handle);
2439 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002440
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 if (conn) {
2442 register struct hci_proto *hp;
2443
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002444 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002445
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002447 hp = hci_proto[HCI_PROTO_L2CAP];
2448 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 hp->recv_acldata(conn, skb, flags);
2450 return;
2451 }
2452 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002453 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 hdev->name, handle);
2455 }
2456
2457 kfree_skb(skb);
2458}
2459
2460/* SCO data packet */
2461static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2462{
2463 struct hci_sco_hdr *hdr = (void *) skb->data;
2464 struct hci_conn *conn;
2465 __u16 handle;
2466
2467 skb_pull(skb, HCI_SCO_HDR_SIZE);
2468
2469 handle = __le16_to_cpu(hdr->handle);
2470
2471 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2472
2473 hdev->stat.sco_rx++;
2474
2475 hci_dev_lock(hdev);
2476 conn = hci_conn_hash_lookup_handle(hdev, handle);
2477 hci_dev_unlock(hdev);
2478
2479 if (conn) {
2480 register struct hci_proto *hp;
2481
2482 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002483 hp = hci_proto[HCI_PROTO_SCO];
2484 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 hp->recv_scodata(conn, skb);
2486 return;
2487 }
2488 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002489 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 hdev->name, handle);
2491 }
2492
2493 kfree_skb(skb);
2494}
2495
Marcel Holtmann65164552005-10-28 19:20:48 +02002496static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497{
2498 struct hci_dev *hdev = (struct hci_dev *) arg;
2499 struct sk_buff *skb;
2500
2501 BT_DBG("%s", hdev->name);
2502
2503 read_lock(&hci_task_lock);
2504
2505 while ((skb = skb_dequeue(&hdev->rx_q))) {
2506 if (atomic_read(&hdev->promisc)) {
2507 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002508 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 }
2510
2511 if (test_bit(HCI_RAW, &hdev->flags)) {
2512 kfree_skb(skb);
2513 continue;
2514 }
2515
2516 if (test_bit(HCI_INIT, &hdev->flags)) {
2517 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002518 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 case HCI_ACLDATA_PKT:
2520 case HCI_SCODATA_PKT:
2521 kfree_skb(skb);
2522 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 }
2525
2526 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002527 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 case HCI_EVENT_PKT:
2529 hci_event_packet(hdev, skb);
2530 break;
2531
2532 case HCI_ACLDATA_PKT:
2533 BT_DBG("%s ACL data packet", hdev->name);
2534 hci_acldata_packet(hdev, skb);
2535 break;
2536
2537 case HCI_SCODATA_PKT:
2538 BT_DBG("%s SCO data packet", hdev->name);
2539 hci_scodata_packet(hdev, skb);
2540 break;
2541
2542 default:
2543 kfree_skb(skb);
2544 break;
2545 }
2546 }
2547
2548 read_unlock(&hci_task_lock);
2549}
2550
2551static void hci_cmd_task(unsigned long arg)
2552{
2553 struct hci_dev *hdev = (struct hci_dev *) arg;
2554 struct sk_buff *skb;
2555
2556 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2557
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002559 if (atomic_read(&hdev->cmd_cnt)) {
2560 skb = skb_dequeue(&hdev->cmd_q);
2561 if (!skb)
2562 return;
2563
Wei Yongjun7585b972009-02-25 18:29:52 +08002564 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002566 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2567 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 atomic_dec(&hdev->cmd_cnt);
2569 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002570 if (test_bit(HCI_RESET, &hdev->flags))
2571 del_timer(&hdev->cmd_timer);
2572 else
2573 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002574 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 } else {
2576 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002577 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 }
2579 }
2580}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002581
2582int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2583{
2584 /* General inquiry access code (GIAC) */
2585 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2586 struct hci_cp_inquiry cp;
2587
2588 BT_DBG("%s", hdev->name);
2589
2590 if (test_bit(HCI_INQUIRY, &hdev->flags))
2591 return -EINPROGRESS;
2592
2593 memset(&cp, 0, sizeof(cp));
2594 memcpy(&cp.lap, lap, sizeof(cp.lap));
2595 cp.length = length;
2596
2597 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2598}
Andre Guedes023d5042011-11-04 14:16:52 -03002599
2600int hci_cancel_inquiry(struct hci_dev *hdev)
2601{
2602 BT_DBG("%s", hdev->name);
2603
2604 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2605 return -EPERM;
2606
2607 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2608}