blob: ec1019178f8072aa77301b3ecf97bdebf0ac4b6a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020057int enable_hs;
58
Marcel Holtmannb78752c2010-08-08 23:06:53 -040059static void hci_rx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060static void hci_cmd_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63static DEFINE_RWLOCK(hci_task_lock);
64
65/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080078static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
Alan Sterne041c682006-03-27 01:16:30 -080089 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Marcel Holtmann65164552005-10-28 19:20:48 +020092static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Alan Sterne041c682006-03-27 01:16:30 -080094 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97/* ---- HCI requests ---- */
98
Johan Hedberg23bb5762010-12-21 23:01:27 +020099void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
Johan Hedberga5040ef2011-01-10 13:28:59 +0200103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200107 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100171 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 int ret;
174
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200197 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100212 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200234 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235
236 /* Read Class of Device */
237 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238
239 /* Read Local Name */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
245 /* Optional initialization */
246
247 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200248 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200254
255 bacpy(&cp.bdaddr, BDADDR_ANY);
256 cp.delete_all = 1;
257 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258}
259
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300260static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
261{
262 BT_DBG("%s", hdev->name);
263
264 /* Read LE buffer size */
265 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200294 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
Marcel Holtmanna418b892008-11-30 12:17:28 +0100302 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900308/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200312 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
314 BT_DBG("%d", index);
315
316 if (index < 0)
317 return NULL;
318
319 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200320 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200361 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371
372 ie->next = cache->list;
373 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200432 hdev = hci_dev_get(ir.dev_id);
433 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 return -ENODEV;
435
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300436 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900437 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200438 inquiry_cache_empty(hdev) ||
439 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 inquiry_cache_flush(hdev);
441 do_inquiry = 1;
442 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300443 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Marcel Holtmann04837f62006-07-03 10:02:33 +0200445 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200446
447 if (do_inquiry) {
448 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449 if (err < 0)
450 goto done;
451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 /* for unlimited number of responses we will use buffer with 255 entries */
454 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 * copy it to the user space.
458 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100459 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200460 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 err = -ENOMEM;
462 goto done;
463 }
464
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300465 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300467 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468
469 BT_DBG("num_rsp %d", ir.num_rsp);
470
471 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472 ptr += sizeof(ir);
473 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 ir.num_rsp))
475 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900476 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 err = -EFAULT;
478
479 kfree(buf);
480
481done:
482 hci_dev_put(hdev);
483 return err;
484}
485
486/* ---- HCI ioctl helpers ---- */
487
488int hci_dev_open(__u16 dev)
489{
490 struct hci_dev *hdev;
491 int ret = 0;
492
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200493 hdev = hci_dev_get(dev);
494 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200514 /* Treat all non BR/EDR controllers as raw devices if
515 enable_hs is not set */
516 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100517 set_bit(HCI_RAW, &hdev->flags);
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 if (hdev->open(hdev)) {
520 ret = -EIO;
521 goto done;
522 }
523
524 if (!test_bit(HCI_RAW, &hdev->flags)) {
525 atomic_set(&hdev->cmd_cnt, 1);
526 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200527 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Marcel Holtmann04837f62006-07-03 10:02:33 +0200529 ret = __hci_request(hdev, hci_init_req, 0,
530 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
Andre Guedeseead27d2011-06-30 19:20:55 -0300532 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300533 ret = __hci_request(hdev, hci_le_init_req, 0,
534 msecs_to_jiffies(HCI_INIT_TIMEOUT));
535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 clear_bit(HCI_INIT, &hdev->flags);
537 }
538
539 if (!ret) {
540 hci_dev_hold(hdev);
541 set_bit(HCI_UP, &hdev->flags);
542 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200543 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300544 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200545 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200547 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900548 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 /* Init failed, cleanup */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 tasklet_kill(&hdev->tx_task);
551 tasklet_kill(&hdev->cmd_task);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400552 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
554 skb_queue_purge(&hdev->cmd_q);
555 skb_queue_purge(&hdev->rx_q);
556
557 if (hdev->flush)
558 hdev->flush(hdev);
559
560 if (hdev->sent_cmd) {
561 kfree_skb(hdev->sent_cmd);
562 hdev->sent_cmd = NULL;
563 }
564
565 hdev->close(hdev);
566 hdev->flags = 0;
567 }
568
569done:
570 hci_req_unlock(hdev);
571 hci_dev_put(hdev);
572 return ret;
573}
574
575static int hci_dev_do_close(struct hci_dev *hdev)
576{
577 BT_DBG("%s %p", hdev->name, hdev);
578
579 hci_req_cancel(hdev, ENODEV);
580 hci_req_lock(hdev);
581
582 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300583 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 hci_req_unlock(hdev);
585 return 0;
586 }
587
588 /* Kill RX and TX tasks */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 tasklet_kill(&hdev->tx_task);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400590 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200592 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200593 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200594 hdev->discov_timeout = 0;
595 }
596
Johan Hedberg32435532011-11-07 22:16:04 +0200597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200598 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200599
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300600 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300603 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
605 hci_notify(hdev, HCI_DEV_DOWN);
606
607 if (hdev->flush)
608 hdev->flush(hdev);
609
610 /* Reset device */
611 skb_queue_purge(&hdev->cmd_q);
612 atomic_set(&hdev->cmd_cnt, 1);
613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200615 __hci_request(hdev, hci_reset_req, 0,
Szymon Janc43611a72011-10-17 23:05:49 +0200616 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 clear_bit(HCI_INIT, &hdev->flags);
618 }
619
620 /* Kill cmd task */
621 tasklet_kill(&hdev->cmd_task);
622
623 /* Drop queues */
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->raw_q);
627
628 /* Drop last sent command */
629 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300630 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
633 }
634
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
637 hdev->close(hdev);
638
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300639 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200640 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300641 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 /* Clear flags */
644 hdev->flags = 0;
645
646 hci_req_unlock(hdev);
647
648 hci_dev_put(hdev);
649 return 0;
650}
651
652int hci_dev_close(__u16 dev)
653{
654 struct hci_dev *hdev;
655 int err;
656
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200657 hdev = hci_dev_get(dev);
658 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return -ENODEV;
660 err = hci_dev_do_close(hdev);
661 hci_dev_put(hdev);
662 return err;
663}
664
665int hci_dev_reset(__u16 dev)
666{
667 struct hci_dev *hdev;
668 int ret = 0;
669
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200670 hdev = hci_dev_get(dev);
671 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 return -ENODEV;
673
674 hci_req_lock(hdev);
675 tasklet_disable(&hdev->tx_task);
676
677 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done;
679
680 /* Drop queues */
681 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q);
683
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300684 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300687 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 if (hdev->flush)
690 hdev->flush(hdev);
691
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900692 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200696 ret = __hci_request(hdev, hci_reset_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699done:
700 tasklet_enable(&hdev->tx_task);
701 hci_req_unlock(hdev);
702 hci_dev_put(hdev);
703 return ret;
704}
705
706int hci_dev_reset_stat(__u16 dev)
707{
708 struct hci_dev *hdev;
709 int ret = 0;
710
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200711 hdev = hci_dev_get(dev);
712 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return -ENODEV;
714
715 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716
717 hci_dev_put(hdev);
718
719 return ret;
720}
721
722int hci_dev_cmd(unsigned int cmd, void __user *arg)
723{
724 struct hci_dev *hdev;
725 struct hci_dev_req dr;
726 int err = 0;
727
728 if (copy_from_user(&dr, arg, sizeof(dr)))
729 return -EFAULT;
730
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200731 hdev = hci_dev_get(dr.dev_id);
732 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return -ENODEV;
734
735 switch (cmd) {
736 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200737 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 break;
740
741 case HCISETENCRYPT:
742 if (!lmp_encrypt_capable(hdev)) {
743 err = -EOPNOTSUPP;
744 break;
745 }
746
747 if (!test_bit(HCI_AUTH, &hdev->flags)) {
748 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (err)
752 break;
753 }
754
Marcel Holtmann04837f62006-07-03 10:02:33 +0200755 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 break;
758
759 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200760 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
763
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200764 case HCISETLINKPOL:
765 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 break;
768
769 case HCISETLINKMODE:
770 hdev->link_mode = ((__u16) dr.dev_opt) &
771 (HCI_LM_MASTER | HCI_LM_ACCEPT);
772 break;
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 case HCISETPTYPE:
775 hdev->pkt_type = (__u16) dr.dev_opt;
776 break;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200779 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
780 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
782
783 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200784 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
785 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 break;
787
788 default:
789 err = -EINVAL;
790 break;
791 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 hci_dev_put(hdev);
794 return err;
795}
796
797int hci_get_dev_list(void __user *arg)
798{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200799 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct hci_dev_list_req *dl;
801 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 int n = 0, size, err;
803 __u16 dev_num;
804
805 if (get_user(dev_num, (__u16 __user *) arg))
806 return -EFAULT;
807
808 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 return -EINVAL;
810
811 size = sizeof(*dl) + dev_num * sizeof(*dr);
812
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200813 dl = kzalloc(size, GFP_KERNEL);
814 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return -ENOMEM;
816
817 dr = dl->dev_req;
818
819 read_lock_bh(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200820 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200821 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200822 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200823
824 if (!test_bit(HCI_MGMT, &hdev->flags))
825 set_bit(HCI_PAIRABLE, &hdev->flags);
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 (dr + n)->dev_id = hdev->id;
828 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 if (++n >= dev_num)
831 break;
832 }
833 read_unlock_bh(&hci_dev_list_lock);
834
835 dl->dev_num = n;
836 size = sizeof(*dl) + n * sizeof(*dr);
837
838 err = copy_to_user(arg, dl, size);
839 kfree(dl);
840
841 return err ? -EFAULT : 0;
842}
843
844int hci_get_dev_info(void __user *arg)
845{
846 struct hci_dev *hdev;
847 struct hci_dev_info di;
848 int err = 0;
849
850 if (copy_from_user(&di, arg, sizeof(di)))
851 return -EFAULT;
852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 hdev = hci_dev_get(di.dev_id);
854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 return -ENODEV;
856
Johan Hedberg32435532011-11-07 22:16:04 +0200857 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
858 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200859
Johan Hedbergc542a062011-01-26 13:11:03 +0200860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884}
885
886/* ---- Interface to HCI drivers ---- */
887
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200888static int hci_rfkill_set_block(void *data, bool blocked)
889{
890 struct hci_dev *hdev = data;
891
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894 if (!blocked)
895 return 0;
896
897 hci_dev_do_close(hdev);
898
899 return 0;
900}
901
902static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
904};
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906/* Alloc HCI device */
907struct hci_dev *hci_alloc_dev(void)
908{
909 struct hci_dev *hdev;
910
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (!hdev)
913 return NULL;
914
David Herrmann0ac7e702011-10-08 14:58:47 +0200915 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 skb_queue_head_init(&hdev->driver_init);
917
918 return hdev;
919}
920EXPORT_SYMBOL(hci_alloc_dev);
921
922/* Free HCI device */
923void hci_free_dev(struct hci_dev *hdev)
924{
925 skb_queue_purge(&hdev->driver_init);
926
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200927 /* will free via device release */
928 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929}
930EXPORT_SYMBOL(hci_free_dev);
931
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200932static void hci_power_on(struct work_struct *work)
933{
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936 BT_DBG("%s", hdev->name);
937
938 if (hci_dev_open(hdev->id) < 0)
939 return;
940
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200942 queue_delayed_work(hdev->workqueue, &hdev->power_off,
943 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200944
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200946 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200947}
948
949static void hci_power_off(struct work_struct *work)
950{
Johan Hedberg32435532011-11-07 22:16:04 +0200951 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200953
954 BT_DBG("%s", hdev->name);
955
Johan Hedberg32435532011-11-07 22:16:04 +0200956 clear_bit(HCI_AUTO_OFF, &hdev->flags);
957
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200958 hci_dev_close(hdev->id);
959}
960
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200961static void hci_discov_off(struct work_struct *work)
962{
963 struct hci_dev *hdev;
964 u8 scan = SCAN_PAGE;
965
966 hdev = container_of(work, struct hci_dev, discov_off.work);
967
968 BT_DBG("%s", hdev->name);
969
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300970 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200971
972 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
973
974 hdev->discov_timeout = 0;
975
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300976 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200977}
978
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200979int hci_uuids_clear(struct hci_dev *hdev)
980{
981 struct list_head *p, *n;
982
983 list_for_each_safe(p, n, &hdev->uuids) {
984 struct bt_uuid *uuid;
985
986 uuid = list_entry(p, struct bt_uuid, list);
987
988 list_del(p);
989 kfree(uuid);
990 }
991
992 return 0;
993}
994
Johan Hedberg55ed8ca12011-01-17 14:41:05 +0200995int hci_link_keys_clear(struct hci_dev *hdev)
996{
997 struct list_head *p, *n;
998
999 list_for_each_safe(p, n, &hdev->link_keys) {
1000 struct link_key *key;
1001
1002 key = list_entry(p, struct link_key, list);
1003
1004 list_del(p);
1005 kfree(key);
1006 }
1007
1008 return 0;
1009}
1010
1011struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1012{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001013 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001014
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001015 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001016 if (bacmp(bdaddr, &k->bdaddr) == 0)
1017 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001018
1019 return NULL;
1020}
1021
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001022static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1023 u8 key_type, u8 old_key_type)
1024{
1025 /* Legacy key */
1026 if (key_type < 0x03)
1027 return 1;
1028
1029 /* Debug keys are insecure so don't store them persistently */
1030 if (key_type == HCI_LK_DEBUG_COMBINATION)
1031 return 0;
1032
1033 /* Changed combination key and there's no previous one */
1034 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1035 return 0;
1036
1037 /* Security mode 3 case */
1038 if (!conn)
1039 return 1;
1040
1041 /* Neither local nor remote side had no-bonding as requirement */
1042 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1043 return 1;
1044
1045 /* Local side had dedicated bonding as requirement */
1046 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1047 return 1;
1048
1049 /* Remote side had dedicated bonding as requirement */
1050 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1051 return 1;
1052
1053 /* If none of the above criteria match, then don't store the key
1054 * persistently */
1055 return 0;
1056}
1057
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001058struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1059{
1060 struct link_key *k;
1061
1062 list_for_each_entry(k, &hdev->link_keys, list) {
1063 struct key_master_id *id;
1064
1065 if (k->type != HCI_LK_SMP_LTK)
1066 continue;
1067
1068 if (k->dlen != sizeof(*id))
1069 continue;
1070
1071 id = (void *) &k->data;
1072 if (id->ediv == ediv &&
1073 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1074 return k;
1075 }
1076
1077 return NULL;
1078}
1079EXPORT_SYMBOL(hci_find_ltk);
1080
1081struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1082 bdaddr_t *bdaddr, u8 type)
1083{
1084 struct link_key *k;
1085
1086 list_for_each_entry(k, &hdev->link_keys, list)
1087 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1088 return k;
1089
1090 return NULL;
1091}
1092EXPORT_SYMBOL(hci_find_link_key_type);
1093
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001094int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1095 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001096{
1097 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001098 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001099
1100 old_key = hci_find_link_key(hdev, bdaddr);
1101 if (old_key) {
1102 old_key_type = old_key->type;
1103 key = old_key;
1104 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001105 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001106 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1107 if (!key)
1108 return -ENOMEM;
1109 list_add(&key->list, &hdev->link_keys);
1110 }
1111
1112 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1113
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001114 /* Some buggy controller combinations generate a changed
1115 * combination key for legacy pairing even when there's no
1116 * previous key */
1117 if (type == HCI_LK_CHANGED_COMBINATION &&
1118 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001119 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001120 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001121 if (conn)
1122 conn->key_type = type;
1123 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001124
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001125 bacpy(&key->bdaddr, bdaddr);
1126 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001127 key->pin_len = pin_len;
1128
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001129 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001130 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001131 else
1132 key->type = type;
1133
Johan Hedberg4df378a2011-04-28 11:29:03 -07001134 if (!new_key)
1135 return 0;
1136
1137 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1138
Johan Hedberg744cf192011-11-08 20:40:14 +02001139 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001140
1141 if (!persistent) {
1142 list_del(&key->list);
1143 kfree(key);
1144 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001145
1146 return 0;
1147}
1148
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001149int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001150 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001151{
1152 struct link_key *key, *old_key;
1153 struct key_master_id *id;
1154 u8 old_key_type;
1155
1156 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1157
1158 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1159 if (old_key) {
1160 key = old_key;
1161 old_key_type = old_key->type;
1162 } else {
1163 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1164 if (!key)
1165 return -ENOMEM;
1166 list_add(&key->list, &hdev->link_keys);
1167 old_key_type = 0xff;
1168 }
1169
1170 key->dlen = sizeof(*id);
1171
1172 bacpy(&key->bdaddr, bdaddr);
1173 memcpy(key->val, ltk, sizeof(key->val));
1174 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001175 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001176
1177 id = (void *) &key->data;
1178 id->ediv = ediv;
1179 memcpy(id->rand, rand, sizeof(id->rand));
1180
1181 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001182 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001183
1184 return 0;
1185}
1186
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001187int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1188{
1189 struct link_key *key;
1190
1191 key = hci_find_link_key(hdev, bdaddr);
1192 if (!key)
1193 return -ENOENT;
1194
1195 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1196
1197 list_del(&key->list);
1198 kfree(key);
1199
1200 return 0;
1201}
1202
Ville Tervo6bd32322011-02-16 16:32:41 +02001203/* HCI command timer function */
1204static void hci_cmd_timer(unsigned long arg)
1205{
1206 struct hci_dev *hdev = (void *) arg;
1207
1208 BT_ERR("%s command tx timeout", hdev->name);
1209 atomic_set(&hdev->cmd_cnt, 1);
1210 tasklet_schedule(&hdev->cmd_task);
1211}
1212
Szymon Janc2763eda2011-03-22 13:12:22 +01001213struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1214 bdaddr_t *bdaddr)
1215{
1216 struct oob_data *data;
1217
1218 list_for_each_entry(data, &hdev->remote_oob_data, list)
1219 if (bacmp(bdaddr, &data->bdaddr) == 0)
1220 return data;
1221
1222 return NULL;
1223}
1224
1225int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226{
1227 struct oob_data *data;
1228
1229 data = hci_find_remote_oob_data(hdev, bdaddr);
1230 if (!data)
1231 return -ENOENT;
1232
1233 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1234
1235 list_del(&data->list);
1236 kfree(data);
1237
1238 return 0;
1239}
1240
1241int hci_remote_oob_data_clear(struct hci_dev *hdev)
1242{
1243 struct oob_data *data, *n;
1244
1245 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1246 list_del(&data->list);
1247 kfree(data);
1248 }
1249
1250 return 0;
1251}
1252
1253int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1254 u8 *randomizer)
1255{
1256 struct oob_data *data;
1257
1258 data = hci_find_remote_oob_data(hdev, bdaddr);
1259
1260 if (!data) {
1261 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1262 if (!data)
1263 return -ENOMEM;
1264
1265 bacpy(&data->bdaddr, bdaddr);
1266 list_add(&data->list, &hdev->remote_oob_data);
1267 }
1268
1269 memcpy(data->hash, hash, sizeof(data->hash));
1270 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1271
1272 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1273
1274 return 0;
1275}
1276
Antti Julkub2a66aa2011-06-15 12:01:14 +03001277struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1278 bdaddr_t *bdaddr)
1279{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001280 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001281
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001282 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001283 if (bacmp(bdaddr, &b->bdaddr) == 0)
1284 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001285
1286 return NULL;
1287}
1288
1289int hci_blacklist_clear(struct hci_dev *hdev)
1290{
1291 struct list_head *p, *n;
1292
1293 list_for_each_safe(p, n, &hdev->blacklist) {
1294 struct bdaddr_list *b;
1295
1296 b = list_entry(p, struct bdaddr_list, list);
1297
1298 list_del(p);
1299 kfree(b);
1300 }
1301
1302 return 0;
1303}
1304
1305int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1306{
1307 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001308
1309 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1310 return -EBADF;
1311
Antti Julku5e762442011-08-25 16:48:02 +03001312 if (hci_blacklist_lookup(hdev, bdaddr))
1313 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001314
1315 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001316 if (!entry)
1317 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001318
1319 bacpy(&entry->bdaddr, bdaddr);
1320
1321 list_add(&entry->list, &hdev->blacklist);
1322
Johan Hedberg744cf192011-11-08 20:40:14 +02001323 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001324}
1325
1326int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327{
1328 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001329
Szymon Janc1ec918c2011-11-16 09:32:21 +01001330 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001331 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001332
1333 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001334 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001335 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001336
1337 list_del(&entry->list);
1338 kfree(entry);
1339
Johan Hedberg744cf192011-11-08 20:40:14 +02001340 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001341}
1342
Andre Guedes35815082011-05-26 16:23:53 -03001343static void hci_clear_adv_cache(unsigned long arg)
1344{
1345 struct hci_dev *hdev = (void *) arg;
1346
1347 hci_dev_lock(hdev);
1348
1349 hci_adv_entries_clear(hdev);
1350
1351 hci_dev_unlock(hdev);
1352}
1353
Andre Guedes76c86862011-05-26 16:23:50 -03001354int hci_adv_entries_clear(struct hci_dev *hdev)
1355{
1356 struct adv_entry *entry, *tmp;
1357
1358 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1359 list_del(&entry->list);
1360 kfree(entry);
1361 }
1362
1363 BT_DBG("%s adv cache cleared", hdev->name);
1364
1365 return 0;
1366}
1367
1368struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369{
1370 struct adv_entry *entry;
1371
1372 list_for_each_entry(entry, &hdev->adv_entries, list)
1373 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1374 return entry;
1375
1376 return NULL;
1377}
1378
1379static inline int is_connectable_adv(u8 evt_type)
1380{
1381 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1382 return 1;
1383
1384 return 0;
1385}
1386
1387int hci_add_adv_entry(struct hci_dev *hdev,
1388 struct hci_ev_le_advertising_info *ev)
1389{
1390 struct adv_entry *entry;
1391
1392 if (!is_connectable_adv(ev->evt_type))
1393 return -EINVAL;
1394
1395 /* Only new entries should be added to adv_entries. So, if
1396 * bdaddr was found, don't add it. */
1397 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1398 return 0;
1399
1400 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1401 if (!entry)
1402 return -ENOMEM;
1403
1404 bacpy(&entry->bdaddr, &ev->bdaddr);
1405 entry->bdaddr_type = ev->bdaddr_type;
1406
1407 list_add(&entry->list, &hdev->adv_entries);
1408
1409 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1410 batostr(&entry->bdaddr), entry->bdaddr_type);
1411
1412 return 0;
1413}
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415/* Register HCI device */
1416int hci_register_dev(struct hci_dev *hdev)
1417{
1418 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001419 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001421 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1422 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
1424 if (!hdev->open || !hdev->close || !hdev->destruct)
1425 return -EINVAL;
1426
Mat Martineau08add512011-11-02 16:18:36 -07001427 /* Do not allow HCI_AMP devices to register at index 0,
1428 * so the index can be used as the AMP controller ID.
1429 */
1430 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 write_lock_bh(&hci_dev_list_lock);
1433
1434 /* Find first available device id */
1435 list_for_each(p, &hci_dev_list) {
1436 if (list_entry(p, struct hci_dev, list)->id != id)
1437 break;
1438 head = p; id++;
1439 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001440
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 sprintf(hdev->name, "hci%d", id);
1442 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001443 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001446 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001449 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001451 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001453 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
Marcel Holtmann04837f62006-07-03 10:02:33 +02001455 hdev->idle_timeout = 0;
1456 hdev->sniff_max_interval = 800;
1457 hdev->sniff_min_interval = 80;
1458
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001459 INIT_WORK(&hdev->rx_work, hci_rx_work);
1460
1461 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1463
1464 skb_queue_head_init(&hdev->rx_q);
1465 skb_queue_head_init(&hdev->cmd_q);
1466 skb_queue_head_init(&hdev->raw_q);
1467
Ville Tervo6bd32322011-02-16 16:32:41 +02001468 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1469
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301470 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001471 hdev->reassembly[i] = NULL;
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001474 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 inquiry_cache_init(hdev);
1477
1478 hci_conn_hash_init(hdev);
1479
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001480 INIT_LIST_HEAD(&hdev->mgmt_pending);
1481
David Millerea4bd8b2010-07-30 21:54:49 -07001482 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001483
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001484 INIT_LIST_HEAD(&hdev->uuids);
1485
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001486 INIT_LIST_HEAD(&hdev->link_keys);
1487
Szymon Janc2763eda2011-03-22 13:12:22 +01001488 INIT_LIST_HEAD(&hdev->remote_oob_data);
1489
Andre Guedes76c86862011-05-26 16:23:50 -03001490 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001491 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1492 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001493
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001494 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001495 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001496
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001497 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1500
1501 atomic_set(&hdev->promisc, 0);
1502
1503 write_unlock_bh(&hci_dev_list_lock);
1504
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001505 hdev->workqueue = create_singlethread_workqueue(hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02001506 if (!hdev->workqueue) {
1507 error = -ENOMEM;
1508 goto err;
1509 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001510
David Herrmann33ca9542011-10-08 14:58:49 +02001511 error = hci_add_sysfs(hdev);
1512 if (error < 0)
1513 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001515 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517 if (hdev->rfkill) {
1518 if (rfkill_register(hdev->rfkill) < 0) {
1519 rfkill_destroy(hdev->rfkill);
1520 hdev->rfkill = NULL;
1521 }
1522 }
1523
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001524 set_bit(HCI_AUTO_OFF, &hdev->flags);
1525 set_bit(HCI_SETUP, &hdev->flags);
1526 queue_work(hdev->workqueue, &hdev->power_on);
1527
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 hci_notify(hdev, HCI_DEV_REG);
1529
1530 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001531
David Herrmann33ca9542011-10-08 14:58:49 +02001532err_wqueue:
1533 destroy_workqueue(hdev->workqueue);
1534err:
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001535 write_lock_bh(&hci_dev_list_lock);
1536 list_del(&hdev->list);
1537 write_unlock_bh(&hci_dev_list_lock);
1538
David Herrmann33ca9542011-10-08 14:58:49 +02001539 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540}
1541EXPORT_SYMBOL(hci_register_dev);
1542
1543/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001544void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545{
Marcel Holtmannef222012007-07-11 06:42:04 +02001546 int i;
1547
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001548 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 write_lock_bh(&hci_dev_list_lock);
1551 list_del(&hdev->list);
1552 write_unlock_bh(&hci_dev_list_lock);
1553
1554 hci_dev_do_close(hdev);
1555
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301556 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001557 kfree_skb(hdev->reassembly[i]);
1558
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001560 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001561 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001562 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001563 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001564 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001565
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001566 /* mgmt_index_removed should take care of emptying the
1567 * pending list */
1568 BUG_ON(!list_empty(&hdev->mgmt_pending));
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 hci_notify(hdev, HCI_DEV_UNREG);
1571
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001572 if (hdev->rfkill) {
1573 rfkill_unregister(hdev->rfkill);
1574 rfkill_destroy(hdev->rfkill);
1575 }
1576
David Herrmannce242972011-10-08 14:58:48 +02001577 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001578
Andre Guedes35815082011-05-26 16:23:53 -03001579 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001580
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001581 destroy_workqueue(hdev->workqueue);
1582
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001583 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001584 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001585 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001586 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001587 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001588 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001589 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592}
1593EXPORT_SYMBOL(hci_unregister_dev);
1594
1595/* Suspend HCI device */
1596int hci_suspend_dev(struct hci_dev *hdev)
1597{
1598 hci_notify(hdev, HCI_DEV_SUSPEND);
1599 return 0;
1600}
1601EXPORT_SYMBOL(hci_suspend_dev);
1602
1603/* Resume HCI device */
1604int hci_resume_dev(struct hci_dev *hdev)
1605{
1606 hci_notify(hdev, HCI_DEV_RESUME);
1607 return 0;
1608}
1609EXPORT_SYMBOL(hci_resume_dev);
1610
Marcel Holtmann76bca882009-11-18 00:40:39 +01001611/* Receive frame from HCI drivers */
1612int hci_recv_frame(struct sk_buff *skb)
1613{
1614 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1615 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1616 && !test_bit(HCI_INIT, &hdev->flags))) {
1617 kfree_skb(skb);
1618 return -ENXIO;
1619 }
1620
1621 /* Incomming skb */
1622 bt_cb(skb)->incoming = 1;
1623
1624 /* Time stamp */
1625 __net_timestamp(skb);
1626
Marcel Holtmann76bca882009-11-18 00:40:39 +01001627 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001628 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001629
Marcel Holtmann76bca882009-11-18 00:40:39 +01001630 return 0;
1631}
1632EXPORT_SYMBOL(hci_recv_frame);
1633
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301634static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001635 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301636{
1637 int len = 0;
1638 int hlen = 0;
1639 int remain = count;
1640 struct sk_buff *skb;
1641 struct bt_skb_cb *scb;
1642
1643 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644 index >= NUM_REASSEMBLY)
1645 return -EILSEQ;
1646
1647 skb = hdev->reassembly[index];
1648
1649 if (!skb) {
1650 switch (type) {
1651 case HCI_ACLDATA_PKT:
1652 len = HCI_MAX_FRAME_SIZE;
1653 hlen = HCI_ACL_HDR_SIZE;
1654 break;
1655 case HCI_EVENT_PKT:
1656 len = HCI_MAX_EVENT_SIZE;
1657 hlen = HCI_EVENT_HDR_SIZE;
1658 break;
1659 case HCI_SCODATA_PKT:
1660 len = HCI_MAX_SCO_SIZE;
1661 hlen = HCI_SCO_HDR_SIZE;
1662 break;
1663 }
1664
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001665 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301666 if (!skb)
1667 return -ENOMEM;
1668
1669 scb = (void *) skb->cb;
1670 scb->expect = hlen;
1671 scb->pkt_type = type;
1672
1673 skb->dev = (void *) hdev;
1674 hdev->reassembly[index] = skb;
1675 }
1676
1677 while (count) {
1678 scb = (void *) skb->cb;
1679 len = min(scb->expect, (__u16)count);
1680
1681 memcpy(skb_put(skb, len), data, len);
1682
1683 count -= len;
1684 data += len;
1685 scb->expect -= len;
1686 remain = count;
1687
1688 switch (type) {
1689 case HCI_EVENT_PKT:
1690 if (skb->len == HCI_EVENT_HDR_SIZE) {
1691 struct hci_event_hdr *h = hci_event_hdr(skb);
1692 scb->expect = h->plen;
1693
1694 if (skb_tailroom(skb) < scb->expect) {
1695 kfree_skb(skb);
1696 hdev->reassembly[index] = NULL;
1697 return -ENOMEM;
1698 }
1699 }
1700 break;
1701
1702 case HCI_ACLDATA_PKT:
1703 if (skb->len == HCI_ACL_HDR_SIZE) {
1704 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705 scb->expect = __le16_to_cpu(h->dlen);
1706
1707 if (skb_tailroom(skb) < scb->expect) {
1708 kfree_skb(skb);
1709 hdev->reassembly[index] = NULL;
1710 return -ENOMEM;
1711 }
1712 }
1713 break;
1714
1715 case HCI_SCODATA_PKT:
1716 if (skb->len == HCI_SCO_HDR_SIZE) {
1717 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718 scb->expect = h->dlen;
1719
1720 if (skb_tailroom(skb) < scb->expect) {
1721 kfree_skb(skb);
1722 hdev->reassembly[index] = NULL;
1723 return -ENOMEM;
1724 }
1725 }
1726 break;
1727 }
1728
1729 if (scb->expect == 0) {
1730 /* Complete frame */
1731
1732 bt_cb(skb)->pkt_type = type;
1733 hci_recv_frame(skb);
1734
1735 hdev->reassembly[index] = NULL;
1736 return remain;
1737 }
1738 }
1739
1740 return remain;
1741}
1742
Marcel Holtmannef222012007-07-11 06:42:04 +02001743int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301745 int rem = 0;
1746
Marcel Holtmannef222012007-07-11 06:42:04 +02001747 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748 return -EILSEQ;
1749
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001750 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001751 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301752 if (rem < 0)
1753 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001754
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301755 data += (count - rem);
1756 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001757 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001758
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301759 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001760}
1761EXPORT_SYMBOL(hci_recv_fragment);
1762
Suraj Sumangala99811512010-07-14 13:02:19 +05301763#define STREAM_REASSEMBLY 0
1764
1765int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766{
1767 int type;
1768 int rem = 0;
1769
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001770 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301771 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772
1773 if (!skb) {
1774 struct { char type; } *pkt;
1775
1776 /* Start of the frame */
1777 pkt = data;
1778 type = pkt->type;
1779
1780 data++;
1781 count--;
1782 } else
1783 type = bt_cb(skb)->pkt_type;
1784
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001785 rem = hci_reassembly(hdev, type, data, count,
1786 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301787 if (rem < 0)
1788 return rem;
1789
1790 data += (count - rem);
1791 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001792 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301793
1794 return rem;
1795}
1796EXPORT_SYMBOL(hci_recv_stream_fragment);
1797
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798/* ---- Interface to upper protocols ---- */
1799
1800/* Register/Unregister protocols.
1801 * hci_task_lock is used to ensure that no tasks are running. */
1802int hci_register_proto(struct hci_proto *hp)
1803{
1804 int err = 0;
1805
1806 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808 if (hp->id >= HCI_MAX_PROTO)
1809 return -EINVAL;
1810
1811 write_lock_bh(&hci_task_lock);
1812
1813 if (!hci_proto[hp->id])
1814 hci_proto[hp->id] = hp;
1815 else
1816 err = -EEXIST;
1817
1818 write_unlock_bh(&hci_task_lock);
1819
1820 return err;
1821}
1822EXPORT_SYMBOL(hci_register_proto);
1823
1824int hci_unregister_proto(struct hci_proto *hp)
1825{
1826 int err = 0;
1827
1828 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829
1830 if (hp->id >= HCI_MAX_PROTO)
1831 return -EINVAL;
1832
1833 write_lock_bh(&hci_task_lock);
1834
1835 if (hci_proto[hp->id])
1836 hci_proto[hp->id] = NULL;
1837 else
1838 err = -ENOENT;
1839
1840 write_unlock_bh(&hci_task_lock);
1841
1842 return err;
1843}
1844EXPORT_SYMBOL(hci_unregister_proto);
1845
1846int hci_register_cb(struct hci_cb *cb)
1847{
1848 BT_DBG("%p name %s", cb, cb->name);
1849
1850 write_lock_bh(&hci_cb_list_lock);
1851 list_add(&cb->list, &hci_cb_list);
1852 write_unlock_bh(&hci_cb_list_lock);
1853
1854 return 0;
1855}
1856EXPORT_SYMBOL(hci_register_cb);
1857
1858int hci_unregister_cb(struct hci_cb *cb)
1859{
1860 BT_DBG("%p name %s", cb, cb->name);
1861
1862 write_lock_bh(&hci_cb_list_lock);
1863 list_del(&cb->list);
1864 write_unlock_bh(&hci_cb_list_lock);
1865
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_unregister_cb);
1869
1870static int hci_send_frame(struct sk_buff *skb)
1871{
1872 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873
1874 if (!hdev) {
1875 kfree_skb(skb);
1876 return -ENODEV;
1877 }
1878
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001879 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881 if (atomic_read(&hdev->promisc)) {
1882 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001883 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001885 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 }
1887
1888 /* Get rid of skb owner, prior to sending to the driver. */
1889 skb_orphan(skb);
1890
1891 return hdev->send(skb);
1892}
1893
1894/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001895int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896{
1897 int len = HCI_COMMAND_HDR_SIZE + plen;
1898 struct hci_command_hdr *hdr;
1899 struct sk_buff *skb;
1900
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001901 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
1903 skb = bt_skb_alloc(len, GFP_ATOMIC);
1904 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001905 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 return -ENOMEM;
1907 }
1908
1909 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001910 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 hdr->plen = plen;
1912
1913 if (plen)
1914 memcpy(skb_put(skb, plen), param, plen);
1915
1916 BT_DBG("skb len %d", skb->len);
1917
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001918 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001920
Johan Hedberga5040ef2011-01-10 13:28:59 +02001921 if (test_bit(HCI_INIT, &hdev->flags))
1922 hdev->init_last_cmd = opcode;
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001925 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
1927 return 0;
1928}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001931void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932{
1933 struct hci_command_hdr *hdr;
1934
1935 if (!hdev->sent_cmd)
1936 return NULL;
1937
1938 hdr = (void *) hdev->sent_cmd->data;
1939
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001940 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return NULL;
1942
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001943 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
1945 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946}
1947
1948/* Send ACL data */
1949static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950{
1951 struct hci_acl_hdr *hdr;
1952 int len = skb->len;
1953
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001954 skb_push(skb, HCI_ACL_HDR_SIZE);
1955 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001956 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001957 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959}
1960
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001961static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963{
1964 struct hci_dev *hdev = conn->hdev;
1965 struct sk_buff *list;
1966
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001967 list = skb_shinfo(skb)->frag_list;
1968 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 /* Non fragmented */
1970 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001972 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 } else {
1974 /* Fragmented */
1975 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976
1977 skb_shinfo(skb)->frag_list = NULL;
1978
1979 /* Queue all fragments atomically */
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001980 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001982 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001983
1984 flags &= ~ACL_START;
1985 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 do {
1987 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001988
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001990 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001991 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992
1993 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001995 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 } while (list);
1997
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001998 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002000}
2001
2002void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003{
2004 struct hci_conn *conn = chan->conn;
2005 struct hci_dev *hdev = conn->hdev;
2006
2007 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008
2009 skb->dev = (void *) hdev;
2010 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011 hci_add_acl_hdr(skb, conn->handle, flags);
2012
2013 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002015 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016}
2017EXPORT_SYMBOL(hci_send_acl);
2018
2019/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002020void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021{
2022 struct hci_dev *hdev = conn->hdev;
2023 struct hci_sco_hdr hdr;
2024
2025 BT_DBG("%s len %d", hdev->name, skb->len);
2026
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002027 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 hdr.dlen = skb->len;
2029
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002030 skb_push(skb, HCI_SCO_HDR_SIZE);
2031 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002032 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
2034 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002035 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002038 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039}
2040EXPORT_SYMBOL(hci_send_sco);
2041
2042/* ---- HCI TX task (outgoing data) ---- */
2043
2044/* HCI Connection scheduler */
2045static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046{
2047 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002048 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002051 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 * added and removed with TX task disabled. */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002053 list_for_each_entry(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002054 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002056
2057 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2058 continue;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 num++;
2061
2062 if (c->sent < min) {
2063 min = c->sent;
2064 conn = c;
2065 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002066
2067 if (hci_conn_num(hdev, type) == num)
2068 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 }
2070
2071 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002072 int cnt, q;
2073
2074 switch (conn->type) {
2075 case ACL_LINK:
2076 cnt = hdev->acl_cnt;
2077 break;
2078 case SCO_LINK:
2079 case ESCO_LINK:
2080 cnt = hdev->sco_cnt;
2081 break;
2082 case LE_LINK:
2083 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2084 break;
2085 default:
2086 cnt = 0;
2087 BT_ERR("Unknown link type");
2088 }
2089
2090 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 *quote = q ? q : 1;
2092 } else
2093 *quote = 0;
2094
2095 BT_DBG("conn %p quote %d", conn, *quote);
2096 return conn;
2097}
2098
Ville Tervobae1f5d92011-02-10 22:38:53 -03002099static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100{
2101 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002102 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
Ville Tervobae1f5d92011-02-10 22:38:53 -03002104 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106 /* Kill stalled connections */
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002107 list_for_each_entry(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002108 if (c->type == type && c->sent) {
2109 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 hdev->name, batostr(&c->dst));
2111 hci_acl_disconn(c, 0x13);
2112 }
2113 }
2114}
2115
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002116static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2117 int *quote)
2118{
2119 struct hci_conn_hash *h = &hdev->conn_hash;
2120 struct hci_chan *chan = NULL;
2121 int num = 0, min = ~0, cur_prio = 0;
2122 struct hci_conn *conn;
2123 int cnt, q, conn_num = 0;
2124
2125 BT_DBG("%s", hdev->name);
2126
2127 list_for_each_entry(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002128 struct hci_chan *tmp;
2129
2130 if (conn->type != type)
2131 continue;
2132
2133 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2134 continue;
2135
2136 conn_num++;
2137
Gustavo F. Padovan2c33c062011-12-14 13:02:51 -02002138 list_for_each_entry(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002139 struct sk_buff *skb;
2140
2141 if (skb_queue_empty(&tmp->data_q))
2142 continue;
2143
2144 skb = skb_peek(&tmp->data_q);
2145 if (skb->priority < cur_prio)
2146 continue;
2147
2148 if (skb->priority > cur_prio) {
2149 num = 0;
2150 min = ~0;
2151 cur_prio = skb->priority;
2152 }
2153
2154 num++;
2155
2156 if (conn->sent < min) {
2157 min = conn->sent;
2158 chan = tmp;
2159 }
2160 }
2161
2162 if (hci_conn_num(hdev, type) == conn_num)
2163 break;
2164 }
2165
2166 if (!chan)
2167 return NULL;
2168
2169 switch (chan->conn->type) {
2170 case ACL_LINK:
2171 cnt = hdev->acl_cnt;
2172 break;
2173 case SCO_LINK:
2174 case ESCO_LINK:
2175 cnt = hdev->sco_cnt;
2176 break;
2177 case LE_LINK:
2178 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2179 break;
2180 default:
2181 cnt = 0;
2182 BT_ERR("Unknown link type");
2183 }
2184
2185 q = cnt / num;
2186 *quote = q ? q : 1;
2187 BT_DBG("chan %p quote %d", chan, *quote);
2188 return chan;
2189}
2190
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002191static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2192{
2193 struct hci_conn_hash *h = &hdev->conn_hash;
2194 struct hci_conn *conn;
2195 int num = 0;
2196
2197 BT_DBG("%s", hdev->name);
2198
2199 list_for_each_entry(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002200 struct hci_chan *chan;
2201
2202 if (conn->type != type)
2203 continue;
2204
2205 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2206 continue;
2207
2208 num++;
2209
Gustavo F. Padovan2c33c062011-12-14 13:02:51 -02002210 list_for_each_entry(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002211 struct sk_buff *skb;
2212
2213 if (chan->sent) {
2214 chan->sent = 0;
2215 continue;
2216 }
2217
2218 if (skb_queue_empty(&chan->data_q))
2219 continue;
2220
2221 skb = skb_peek(&chan->data_q);
2222 if (skb->priority >= HCI_PRIO_MAX - 1)
2223 continue;
2224
2225 skb->priority = HCI_PRIO_MAX - 1;
2226
2227 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2228 skb->priority);
2229 }
2230
2231 if (hci_conn_num(hdev, type) == num)
2232 break;
2233 }
2234}
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236static inline void hci_sched_acl(struct hci_dev *hdev)
2237{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002238 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 struct sk_buff *skb;
2240 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002241 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
2243 BT_DBG("%s", hdev->name);
2244
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002245 if (!hci_conn_num(hdev, ACL_LINK))
2246 return;
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (!test_bit(HCI_RAW, &hdev->flags)) {
2249 /* ACL tx timeout must be longer than maximum
2250 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002251 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002252 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 }
2254
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002255 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002256
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002257 while (hdev->acl_cnt &&
2258 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002259 u32 priority = (skb_peek(&chan->data_q))->priority;
2260 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002261 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2262 skb->len, skb->priority);
2263
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002264 /* Stop if priority has changed */
2265 if (skb->priority < priority)
2266 break;
2267
2268 skb = skb_dequeue(&chan->data_q);
2269
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002270 hci_conn_enter_active_mode(chan->conn,
2271 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 hci_send_frame(skb);
2274 hdev->acl_last_tx = jiffies;
2275
2276 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002277 chan->sent++;
2278 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 }
2280 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002281
2282 if (cnt != hdev->acl_cnt)
2283 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285
2286/* Schedule SCO */
2287static inline void hci_sched_sco(struct hci_dev *hdev)
2288{
2289 struct hci_conn *conn;
2290 struct sk_buff *skb;
2291 int quote;
2292
2293 BT_DBG("%s", hdev->name);
2294
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002295 if (!hci_conn_num(hdev, SCO_LINK))
2296 return;
2297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2299 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2300 BT_DBG("skb %p len %d", skb, skb->len);
2301 hci_send_frame(skb);
2302
2303 conn->sent++;
2304 if (conn->sent == ~0)
2305 conn->sent = 0;
2306 }
2307 }
2308}
2309
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002310static inline void hci_sched_esco(struct hci_dev *hdev)
2311{
2312 struct hci_conn *conn;
2313 struct sk_buff *skb;
2314 int quote;
2315
2316 BT_DBG("%s", hdev->name);
2317
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002318 if (!hci_conn_num(hdev, ESCO_LINK))
2319 return;
2320
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002321 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2322 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2323 BT_DBG("skb %p len %d", skb, skb->len);
2324 hci_send_frame(skb);
2325
2326 conn->sent++;
2327 if (conn->sent == ~0)
2328 conn->sent = 0;
2329 }
2330 }
2331}
2332
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002333static inline void hci_sched_le(struct hci_dev *hdev)
2334{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002335 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002336 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002337 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002338
2339 BT_DBG("%s", hdev->name);
2340
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002341 if (!hci_conn_num(hdev, LE_LINK))
2342 return;
2343
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002344 if (!test_bit(HCI_RAW, &hdev->flags)) {
2345 /* LE tx timeout must be longer than maximum
2346 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002347 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002348 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002349 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002350 }
2351
2352 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002353 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002354 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002355 u32 priority = (skb_peek(&chan->data_q))->priority;
2356 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002357 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2358 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002359
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002360 /* Stop if priority has changed */
2361 if (skb->priority < priority)
2362 break;
2363
2364 skb = skb_dequeue(&chan->data_q);
2365
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002366 hci_send_frame(skb);
2367 hdev->le_last_tx = jiffies;
2368
2369 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002370 chan->sent++;
2371 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002372 }
2373 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002374
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002375 if (hdev->le_pkts)
2376 hdev->le_cnt = cnt;
2377 else
2378 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002379
2380 if (cnt != tmp)
2381 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002382}
2383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384static void hci_tx_task(unsigned long arg)
2385{
2386 struct hci_dev *hdev = (struct hci_dev *) arg;
2387 struct sk_buff *skb;
2388
2389 read_lock(&hci_task_lock);
2390
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002391 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2392 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 /* Schedule queues and send stuff to HCI driver */
2395
2396 hci_sched_acl(hdev);
2397
2398 hci_sched_sco(hdev);
2399
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002400 hci_sched_esco(hdev);
2401
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002402 hci_sched_le(hdev);
2403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 /* Send next queued raw (unknown type) packet */
2405 while ((skb = skb_dequeue(&hdev->raw_q)))
2406 hci_send_frame(skb);
2407
2408 read_unlock(&hci_task_lock);
2409}
2410
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002411/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412
2413/* ACL data packet */
2414static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2415{
2416 struct hci_acl_hdr *hdr = (void *) skb->data;
2417 struct hci_conn *conn;
2418 __u16 handle, flags;
2419
2420 skb_pull(skb, HCI_ACL_HDR_SIZE);
2421
2422 handle = __le16_to_cpu(hdr->handle);
2423 flags = hci_flags(handle);
2424 handle = hci_handle(handle);
2425
2426 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2427
2428 hdev->stat.acl_rx++;
2429
2430 hci_dev_lock(hdev);
2431 conn = hci_conn_hash_lookup_handle(hdev, handle);
2432 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 if (conn) {
2435 register struct hci_proto *hp;
2436
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002437 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002438
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002440 hp = hci_proto[HCI_PROTO_L2CAP];
2441 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 hp->recv_acldata(conn, skb, flags);
2443 return;
2444 }
2445 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002446 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 hdev->name, handle);
2448 }
2449
2450 kfree_skb(skb);
2451}
2452
2453/* SCO data packet */
2454static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2455{
2456 struct hci_sco_hdr *hdr = (void *) skb->data;
2457 struct hci_conn *conn;
2458 __u16 handle;
2459
2460 skb_pull(skb, HCI_SCO_HDR_SIZE);
2461
2462 handle = __le16_to_cpu(hdr->handle);
2463
2464 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2465
2466 hdev->stat.sco_rx++;
2467
2468 hci_dev_lock(hdev);
2469 conn = hci_conn_hash_lookup_handle(hdev, handle);
2470 hci_dev_unlock(hdev);
2471
2472 if (conn) {
2473 register struct hci_proto *hp;
2474
2475 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002476 hp = hci_proto[HCI_PROTO_SCO];
2477 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 hp->recv_scodata(conn, skb);
2479 return;
2480 }
2481 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002482 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hdev->name, handle);
2484 }
2485
2486 kfree_skb(skb);
2487}
2488
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002489static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002491 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492 struct sk_buff *skb;
2493
2494 BT_DBG("%s", hdev->name);
2495
2496 read_lock(&hci_task_lock);
2497
2498 while ((skb = skb_dequeue(&hdev->rx_q))) {
2499 if (atomic_read(&hdev->promisc)) {
2500 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002501 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 }
2503
2504 if (test_bit(HCI_RAW, &hdev->flags)) {
2505 kfree_skb(skb);
2506 continue;
2507 }
2508
2509 if (test_bit(HCI_INIT, &hdev->flags)) {
2510 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002511 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 case HCI_ACLDATA_PKT:
2513 case HCI_SCODATA_PKT:
2514 kfree_skb(skb);
2515 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 }
2518
2519 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002520 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002522 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 hci_event_packet(hdev, skb);
2524 break;
2525
2526 case HCI_ACLDATA_PKT:
2527 BT_DBG("%s ACL data packet", hdev->name);
2528 hci_acldata_packet(hdev, skb);
2529 break;
2530
2531 case HCI_SCODATA_PKT:
2532 BT_DBG("%s SCO data packet", hdev->name);
2533 hci_scodata_packet(hdev, skb);
2534 break;
2535
2536 default:
2537 kfree_skb(skb);
2538 break;
2539 }
2540 }
2541
2542 read_unlock(&hci_task_lock);
2543}
2544
2545static void hci_cmd_task(unsigned long arg)
2546{
2547 struct hci_dev *hdev = (struct hci_dev *) arg;
2548 struct sk_buff *skb;
2549
2550 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2551
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002553 if (atomic_read(&hdev->cmd_cnt)) {
2554 skb = skb_dequeue(&hdev->cmd_q);
2555 if (!skb)
2556 return;
2557
Wei Yongjun7585b972009-02-25 18:29:52 +08002558 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002560 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2561 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 atomic_dec(&hdev->cmd_cnt);
2563 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002564 if (test_bit(HCI_RESET, &hdev->flags))
2565 del_timer(&hdev->cmd_timer);
2566 else
2567 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002568 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 } else {
2570 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002571 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 }
2573 }
2574}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002575
2576int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2577{
2578 /* General inquiry access code (GIAC) */
2579 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2580 struct hci_cp_inquiry cp;
2581
2582 BT_DBG("%s", hdev->name);
2583
2584 if (test_bit(HCI_INQUIRY, &hdev->flags))
2585 return -EINPROGRESS;
2586
2587 memset(&cp, 0, sizeof(cp));
2588 memcpy(&cp.lap, lap, sizeof(cp.lap));
2589 cp.length = length;
2590
2591 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2592}
Andre Guedes023d50492011-11-04 14:16:52 -03002593
2594int hci_cancel_inquiry(struct hci_dev *hdev)
2595{
2596 BT_DBG("%s", hdev->name);
2597
2598 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2599 return -EPERM;
2600
2601 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2602}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002603
2604module_param(enable_hs, bool, 0644);
2605MODULE_PARM_DESC(enable_hs, "Enable High Speed");