blob: f62ca1935f5a30f608cbef11ef9443a397ec4cae [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030063static int enable_smp;
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
73/* HCI protocols */
74#define HCI_MAX_PROTO 2
75struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080078static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
80/* ---- HCI notifications ---- */
81
82int hci_register_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
87int hci_unregister_notifier(struct notifier_block *nb)
88{
Alan Sterne041c682006-03-27 01:16:30 -080089 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Marcel Holtmann65164552005-10-28 19:20:48 +020092static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Alan Sterne041c682006-03-27 01:16:30 -080094 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
97/* ---- HCI requests ---- */
98
Johan Hedberg23bb5762010-12-21 23:01:27 +020099void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
Johan Hedberga5040ef2011-01-10 13:28:59 +0200103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
105 */
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200107 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116static void hci_req_cancel(struct hci_dev *hdev, int err)
117{
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
124 }
125}
126
127/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900128static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
151 err = -bt_err(hdev->req_result);
152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100171 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 int ret;
174
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200197 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
202 BT_DBG("%s %ld", hdev->name, opt);
203
204 /* Driver initialization */
205
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100212 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 }
214 skb_queue_purge(&hdev->driver_init);
215
216 /* Mandatory initialization */
217
218 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200227 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233#if 0
234 /* Host buffer size */
235 {
236 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700239 cp.acl_max_pkt = cpu_to_le16(0xffff);
240 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243#endif
244
245 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
247
248 /* Read Class of Device */
249 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
250
251 /* Read Local Name */
252 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Optional initialization */
258
259 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200260 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200261 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700264 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200266
267 bacpy(&cp.bdaddr, BDADDR_ANY);
268 cp.delete_all = 1;
269 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270}
271
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300272static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
273{
274 BT_DBG("%s", hdev->name);
275
276 /* Read LE buffer size */
277 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278}
279
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
281{
282 __u8 scan = opt;
283
284 BT_DBG("%s %x", hdev->name, scan);
285
286 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200287 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
290static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
291{
292 __u8 auth = opt;
293
294 BT_DBG("%s %x", hdev->name, auth);
295
296 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200297 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
300static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
301{
302 __u8 encrypt = opt;
303
304 BT_DBG("%s %x", hdev->name, encrypt);
305
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200306 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200307 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
311{
312 __le16 policy = cpu_to_le16(opt);
313
Marcel Holtmanna418b892008-11-30 12:17:28 +0100314 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315
316 /* Default link policy */
317 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318}
319
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900320/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 * Device is held on return. */
322struct hci_dev *hci_dev_get(int index)
323{
324 struct hci_dev *hdev = NULL;
325 struct list_head *p;
326
327 BT_DBG("%d", index);
328
329 if (index < 0)
330 return NULL;
331
332 read_lock(&hci_dev_list_lock);
333 list_for_each(p, &hci_dev_list) {
334 struct hci_dev *d = list_entry(p, struct hci_dev, list);
335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
338 }
339 }
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
342}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344/* ---- Inquiry support ---- */
345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *next = cache->list, *e;
349
350 BT_DBG("cache %p", cache);
351
352 cache->list = NULL;
353 while ((e = next)) {
354 next = e->next;
355 kfree(e);
356 }
357}
358
359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
360{
361 struct inquiry_cache *cache = &hdev->inq_cache;
362 struct inquiry_entry *e;
363
364 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
365
366 for (e = cache->list; e; e = e->next)
367 if (!bacmp(&e->data.bdaddr, bdaddr))
368 break;
369 return e;
370}
371
372void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376
377 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
378
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
380 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200382 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
383 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200385
386 ie->next = cache->list;
387 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200390 memcpy(&ie->data, data, sizeof(*data));
391 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 cache->timestamp = jiffies;
393}
394
395static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
396{
397 struct inquiry_cache *cache = &hdev->inq_cache;
398 struct inquiry_info *info = (struct inquiry_info *) buf;
399 struct inquiry_entry *e;
400 int copied = 0;
401
402 for (e = cache->list; e && copied < num; e = e->next, copied++) {
403 struct inquiry_data *data = &e->data;
404 bacpy(&info->bdaddr, &data->bdaddr);
405 info->pscan_rep_mode = data->pscan_rep_mode;
406 info->pscan_period_mode = data->pscan_period_mode;
407 info->pscan_mode = data->pscan_mode;
408 memcpy(info->dev_class, data->dev_class, 3);
409 info->clock_offset = data->clock_offset;
410 info++;
411 }
412
413 BT_DBG("cache %p, copied %d", cache, copied);
414 return copied;
415}
416
417static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
418{
419 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
420 struct hci_cp_inquiry cp;
421
422 BT_DBG("%s", hdev->name);
423
424 if (test_bit(HCI_INQUIRY, &hdev->flags))
425 return;
426
427 /* Start Inquiry */
428 memcpy(&cp.lap, &ir->lap, 3);
429 cp.length = ir->length;
430 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200431 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
434int hci_inquiry(void __user *arg)
435{
436 __u8 __user *ptr = arg;
437 struct hci_inquiry_req ir;
438 struct hci_dev *hdev;
439 int err = 0, do_inquiry = 0, max_rsp;
440 long timeo;
441 __u8 *buf;
442
443 if (copy_from_user(&ir, ptr, sizeof(ir)))
444 return -EFAULT;
445
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200446 hdev = hci_dev_get(ir.dev_id);
447 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 return -ENODEV;
449
450 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900451 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200452 inquiry_cache_empty(hdev) ||
453 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 inquiry_cache_flush(hdev);
455 do_inquiry = 1;
456 }
457 hci_dev_unlock_bh(hdev);
458
Marcel Holtmann04837f62006-07-03 10:02:33 +0200459 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200460
461 if (do_inquiry) {
462 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
463 if (err < 0)
464 goto done;
465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
467 /* for unlimited number of responses we will use buffer with 255 entries */
468 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
469
470 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
471 * copy it to the user space.
472 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100473 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200474 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 err = -ENOMEM;
476 goto done;
477 }
478
479 hci_dev_lock_bh(hdev);
480 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
481 hci_dev_unlock_bh(hdev);
482
483 BT_DBG("num_rsp %d", ir.num_rsp);
484
485 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
486 ptr += sizeof(ir);
487 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
488 ir.num_rsp))
489 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900490 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 err = -EFAULT;
492
493 kfree(buf);
494
495done:
496 hci_dev_put(hdev);
497 return err;
498}
499
500/* ---- HCI ioctl helpers ---- */
501
502int hci_dev_open(__u16 dev)
503{
504 struct hci_dev *hdev;
505 int ret = 0;
506
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200507 hdev = hci_dev_get(dev);
508 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 return -ENODEV;
510
511 BT_DBG("%s %p", hdev->name, hdev);
512
513 hci_req_lock(hdev);
514
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200515 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
516 ret = -ERFKILL;
517 goto done;
518 }
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (test_bit(HCI_UP, &hdev->flags)) {
521 ret = -EALREADY;
522 goto done;
523 }
524
525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
526 set_bit(HCI_RAW, &hdev->flags);
527
Marcel Holtmann943da252010-02-13 02:28:41 +0100528 /* Treat all non BR/EDR controllers as raw devices for now */
529 if (hdev->dev_type != HCI_BREDR)
530 set_bit(HCI_RAW, &hdev->flags);
531
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 if (hdev->open(hdev)) {
533 ret = -EIO;
534 goto done;
535 }
536
537 if (!test_bit(HCI_RAW, &hdev->flags)) {
538 atomic_set(&hdev->cmd_cnt, 1);
539 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200540 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Marcel Holtmann04837f62006-07-03 10:02:33 +0200542 ret = __hci_request(hdev, hci_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300545 if (lmp_le_capable(hdev))
546 ret = __hci_request(hdev, hci_le_init_req, 0,
547 msecs_to_jiffies(HCI_INIT_TIMEOUT));
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 clear_bit(HCI_INIT, &hdev->flags);
550 }
551
552 if (!ret) {
553 hci_dev_hold(hdev);
554 set_bit(HCI_UP, &hdev->flags);
555 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200556 if (!test_bit(HCI_SETUP, &hdev->flags))
557 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900558 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 /* Init failed, cleanup */
560 tasklet_kill(&hdev->rx_task);
561 tasklet_kill(&hdev->tx_task);
562 tasklet_kill(&hdev->cmd_task);
563
564 skb_queue_purge(&hdev->cmd_q);
565 skb_queue_purge(&hdev->rx_q);
566
567 if (hdev->flush)
568 hdev->flush(hdev);
569
570 if (hdev->sent_cmd) {
571 kfree_skb(hdev->sent_cmd);
572 hdev->sent_cmd = NULL;
573 }
574
575 hdev->close(hdev);
576 hdev->flags = 0;
577 }
578
579done:
580 hci_req_unlock(hdev);
581 hci_dev_put(hdev);
582 return ret;
583}
584
585static int hci_dev_do_close(struct hci_dev *hdev)
586{
587 BT_DBG("%s %p", hdev->name, hdev);
588
589 hci_req_cancel(hdev, ENODEV);
590 hci_req_lock(hdev);
591
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300593 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
606
607 hci_notify(hdev, HCI_DEV_DOWN);
608
609 if (hdev->flush)
610 hdev->flush(hdev);
611
612 /* Reset device */
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 clear_bit(HCI_INIT, &hdev->flags);
620 }
621
622 /* Kill cmd task */
623 tasklet_kill(&hdev->cmd_task);
624
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
629
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300632 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 kfree_skb(hdev->sent_cmd);
634 hdev->sent_cmd = NULL;
635 }
636
637 /* After this point our queues are empty
638 * and no tasks are scheduled. */
639 hdev->close(hdev);
640
Johan Hedberg5add6af2010-12-16 10:00:37 +0200641 mgmt_powered(hdev->id, 0);
642
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 /* Clear flags */
644 hdev->flags = 0;
645
646 hci_req_unlock(hdev);
647
648 hci_dev_put(hdev);
649 return 0;
650}
651
652int hci_dev_close(__u16 dev)
653{
654 struct hci_dev *hdev;
655 int err;
656
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200657 hdev = hci_dev_get(dev);
658 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 return -ENODEV;
660 err = hci_dev_do_close(hdev);
661 hci_dev_put(hdev);
662 return err;
663}
664
665int hci_dev_reset(__u16 dev)
666{
667 struct hci_dev *hdev;
668 int ret = 0;
669
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200670 hdev = hci_dev_get(dev);
671 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 return -ENODEV;
673
674 hci_req_lock(hdev);
675 tasklet_disable(&hdev->tx_task);
676
677 if (!test_bit(HCI_UP, &hdev->flags))
678 goto done;
679
680 /* Drop queues */
681 skb_queue_purge(&hdev->rx_q);
682 skb_queue_purge(&hdev->cmd_q);
683
684 hci_dev_lock_bh(hdev);
685 inquiry_cache_flush(hdev);
686 hci_conn_hash_flush(hdev);
687 hci_dev_unlock_bh(hdev);
688
689 if (hdev->flush)
690 hdev->flush(hdev);
691
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900692 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200696 ret = __hci_request(hdev, hci_reset_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699done:
700 tasklet_enable(&hdev->tx_task);
701 hci_req_unlock(hdev);
702 hci_dev_put(hdev);
703 return ret;
704}
705
706int hci_dev_reset_stat(__u16 dev)
707{
708 struct hci_dev *hdev;
709 int ret = 0;
710
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200711 hdev = hci_dev_get(dev);
712 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return -ENODEV;
714
715 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716
717 hci_dev_put(hdev);
718
719 return ret;
720}
721
722int hci_dev_cmd(unsigned int cmd, void __user *arg)
723{
724 struct hci_dev *hdev;
725 struct hci_dev_req dr;
726 int err = 0;
727
728 if (copy_from_user(&dr, arg, sizeof(dr)))
729 return -EFAULT;
730
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200731 hdev = hci_dev_get(dr.dev_id);
732 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return -ENODEV;
734
735 switch (cmd) {
736 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200737 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 break;
740
741 case HCISETENCRYPT:
742 if (!lmp_encrypt_capable(hdev)) {
743 err = -EOPNOTSUPP;
744 break;
745 }
746
747 if (!test_bit(HCI_AUTH, &hdev->flags)) {
748 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200749 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 if (err)
752 break;
753 }
754
Marcel Holtmann04837f62006-07-03 10:02:33 +0200755 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 break;
758
759 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200760 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
763
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200764 case HCISETLINKPOL:
765 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
767 break;
768
769 case HCISETLINKMODE:
770 hdev->link_mode = ((__u16) dr.dev_opt) &
771 (HCI_LM_MASTER | HCI_LM_ACCEPT);
772 break;
773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 case HCISETPTYPE:
775 hdev->pkt_type = (__u16) dr.dev_opt;
776 break;
777
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200779 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
780 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
782
783 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200784 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
785 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 break;
787
788 default:
789 err = -EINVAL;
790 break;
791 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200792
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 hci_dev_put(hdev);
794 return err;
795}
796
797int hci_get_dev_list(void __user *arg)
798{
799 struct hci_dev_list_req *dl;
800 struct hci_dev_req *dr;
801 struct list_head *p;
802 int n = 0, size, err;
803 __u16 dev_num;
804
805 if (get_user(dev_num, (__u16 __user *) arg))
806 return -EFAULT;
807
808 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 return -EINVAL;
810
811 size = sizeof(*dl) + dev_num * sizeof(*dr);
812
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200813 dl = kzalloc(size, GFP_KERNEL);
814 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return -ENOMEM;
816
817 dr = dl->dev_req;
818
819 read_lock_bh(&hci_dev_list_lock);
820 list_for_each(p, &hci_dev_list) {
821 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200824
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200825 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200826
827 if (!test_bit(HCI_MGMT, &hdev->flags))
828 set_bit(HCI_PAIRABLE, &hdev->flags);
829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 (dr + n)->dev_id = hdev->id;
831 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 if (++n >= dev_num)
834 break;
835 }
836 read_unlock_bh(&hci_dev_list_lock);
837
838 dl->dev_num = n;
839 size = sizeof(*dl) + n * sizeof(*dr);
840
841 err = copy_to_user(arg, dl, size);
842 kfree(dl);
843
844 return err ? -EFAULT : 0;
845}
846
847int hci_get_dev_info(void __user *arg)
848{
849 struct hci_dev *hdev;
850 struct hci_dev_info di;
851 int err = 0;
852
853 if (copy_from_user(&di, arg, sizeof(di)))
854 return -EFAULT;
855
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200856 hdev = hci_dev_get(di.dev_id);
857 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 return -ENODEV;
859
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200860 hci_del_off_timer(hdev);
861
Johan Hedbergc542a062011-01-26 13:11:03 +0200862 if (!test_bit(HCI_MGMT, &hdev->flags))
863 set_bit(HCI_PAIRABLE, &hdev->flags);
864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 strcpy(di.name, hdev->name);
866 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100867 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 di.flags = hdev->flags;
869 di.pkt_type = hdev->pkt_type;
870 di.acl_mtu = hdev->acl_mtu;
871 di.acl_pkts = hdev->acl_pkts;
872 di.sco_mtu = hdev->sco_mtu;
873 di.sco_pkts = hdev->sco_pkts;
874 di.link_policy = hdev->link_policy;
875 di.link_mode = hdev->link_mode;
876
877 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
878 memcpy(&di.features, &hdev->features, sizeof(di.features));
879
880 if (copy_to_user(arg, &di, sizeof(di)))
881 err = -EFAULT;
882
883 hci_dev_put(hdev);
884
885 return err;
886}
887
888/* ---- Interface to HCI drivers ---- */
889
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200890static int hci_rfkill_set_block(void *data, bool blocked)
891{
892 struct hci_dev *hdev = data;
893
894 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
895
896 if (!blocked)
897 return 0;
898
899 hci_dev_do_close(hdev);
900
901 return 0;
902}
903
904static const struct rfkill_ops hci_rfkill_ops = {
905 .set_block = hci_rfkill_set_block,
906};
907
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908/* Alloc HCI device */
909struct hci_dev *hci_alloc_dev(void)
910{
911 struct hci_dev *hdev;
912
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200913 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (!hdev)
915 return NULL;
916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 skb_queue_head_init(&hdev->driver_init);
918
919 return hdev;
920}
921EXPORT_SYMBOL(hci_alloc_dev);
922
923/* Free HCI device */
924void hci_free_dev(struct hci_dev *hdev)
925{
926 skb_queue_purge(&hdev->driver_init);
927
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200928 /* will free via device release */
929 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930}
931EXPORT_SYMBOL(hci_free_dev);
932
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200933static void hci_power_on(struct work_struct *work)
934{
935 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
936
937 BT_DBG("%s", hdev->name);
938
939 if (hci_dev_open(hdev->id) < 0)
940 return;
941
942 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
943 mod_timer(&hdev->off_timer,
944 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
945
946 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
947 mgmt_index_added(hdev->id);
948}
949
950static void hci_power_off(struct work_struct *work)
951{
952 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
953
954 BT_DBG("%s", hdev->name);
955
956 hci_dev_close(hdev->id);
957}
958
959static void hci_auto_off(unsigned long data)
960{
961 struct hci_dev *hdev = (struct hci_dev *) data;
962
963 BT_DBG("%s", hdev->name);
964
965 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966
967 queue_work(hdev->workqueue, &hdev->power_off);
968}
969
970void hci_del_off_timer(struct hci_dev *hdev)
971{
972 BT_DBG("%s", hdev->name);
973
974 clear_bit(HCI_AUTO_OFF, &hdev->flags);
975 del_timer(&hdev->off_timer);
976}
977
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200978int hci_uuids_clear(struct hci_dev *hdev)
979{
980 struct list_head *p, *n;
981
982 list_for_each_safe(p, n, &hdev->uuids) {
983 struct bt_uuid *uuid;
984
985 uuid = list_entry(p, struct bt_uuid, list);
986
987 list_del(p);
988 kfree(uuid);
989 }
990
991 return 0;
992}
993
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200994int hci_link_keys_clear(struct hci_dev *hdev)
995{
996 struct list_head *p, *n;
997
998 list_for_each_safe(p, n, &hdev->link_keys) {
999 struct link_key *key;
1000
1001 key = list_entry(p, struct link_key, list);
1002
1003 list_del(p);
1004 kfree(key);
1005 }
1006
1007 return 0;
1008}
1009
1010struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011{
1012 struct list_head *p;
1013
1014 list_for_each(p, &hdev->link_keys) {
1015 struct link_key *k;
1016
1017 k = list_entry(p, struct link_key, list);
1018
1019 if (bacmp(bdaddr, &k->bdaddr) == 0)
1020 return k;
1021 }
1022
1023 return NULL;
1024}
1025
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001026static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1027 u8 key_type, u8 old_key_type)
1028{
1029 /* Legacy key */
1030 if (key_type < 0x03)
1031 return 1;
1032
1033 /* Debug keys are insecure so don't store them persistently */
1034 if (key_type == HCI_LK_DEBUG_COMBINATION)
1035 return 0;
1036
1037 /* Changed combination key and there's no previous one */
1038 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1039 return 0;
1040
1041 /* Security mode 3 case */
1042 if (!conn)
1043 return 1;
1044
1045 /* Neither local nor remote side had no-bonding as requirement */
1046 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1047 return 1;
1048
1049 /* Local side had dedicated bonding as requirement */
1050 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1051 return 1;
1052
1053 /* Remote side had dedicated bonding as requirement */
1054 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1055 return 1;
1056
1057 /* If none of the above criteria match, then don't store the key
1058 * persistently */
1059 return 0;
1060}
1061
1062int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1063 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001064{
1065 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001066 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001067
1068 old_key = hci_find_link_key(hdev, bdaddr);
1069 if (old_key) {
1070 old_key_type = old_key->type;
1071 key = old_key;
1072 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001073 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001074 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1075 if (!key)
1076 return -ENOMEM;
1077 list_add(&key->list, &hdev->link_keys);
1078 }
1079
1080 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1081
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001082 /* Some buggy controller combinations generate a changed
1083 * combination key for legacy pairing even when there's no
1084 * previous key */
1085 if (type == HCI_LK_CHANGED_COMBINATION &&
1086 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001087 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001088 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001089 if (conn)
1090 conn->key_type = type;
1091 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001092
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001093 bacpy(&key->bdaddr, bdaddr);
1094 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001095 key->pin_len = pin_len;
1096
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001097 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001099 else
1100 key->type = type;
1101
Johan Hedberg4df378a2011-04-28 11:29:03 -07001102 if (!new_key)
1103 return 0;
1104
1105 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1106
1107 mgmt_new_key(hdev->id, key, persistent);
1108
1109 if (!persistent) {
1110 list_del(&key->list);
1111 kfree(key);
1112 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001113
1114 return 0;
1115}
1116
1117int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1118{
1119 struct link_key *key;
1120
1121 key = hci_find_link_key(hdev, bdaddr);
1122 if (!key)
1123 return -ENOENT;
1124
1125 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1126
1127 list_del(&key->list);
1128 kfree(key);
1129
1130 return 0;
1131}
1132
Ville Tervo6bd32322011-02-16 16:32:41 +02001133/* HCI command timer function */
1134static void hci_cmd_timer(unsigned long arg)
1135{
1136 struct hci_dev *hdev = (void *) arg;
1137
1138 BT_ERR("%s command tx timeout", hdev->name);
1139 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -03001140 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001141 tasklet_schedule(&hdev->cmd_task);
1142}
1143
Szymon Janc2763eda2011-03-22 13:12:22 +01001144struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1145 bdaddr_t *bdaddr)
1146{
1147 struct oob_data *data;
1148
1149 list_for_each_entry(data, &hdev->remote_oob_data, list)
1150 if (bacmp(bdaddr, &data->bdaddr) == 0)
1151 return data;
1152
1153 return NULL;
1154}
1155
1156int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1157{
1158 struct oob_data *data;
1159
1160 data = hci_find_remote_oob_data(hdev, bdaddr);
1161 if (!data)
1162 return -ENOENT;
1163
1164 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1165
1166 list_del(&data->list);
1167 kfree(data);
1168
1169 return 0;
1170}
1171
1172int hci_remote_oob_data_clear(struct hci_dev *hdev)
1173{
1174 struct oob_data *data, *n;
1175
1176 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1177 list_del(&data->list);
1178 kfree(data);
1179 }
1180
1181 return 0;
1182}
1183
1184int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1185 u8 *randomizer)
1186{
1187 struct oob_data *data;
1188
1189 data = hci_find_remote_oob_data(hdev, bdaddr);
1190
1191 if (!data) {
1192 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1193 if (!data)
1194 return -ENOMEM;
1195
1196 bacpy(&data->bdaddr, bdaddr);
1197 list_add(&data->list, &hdev->remote_oob_data);
1198 }
1199
1200 memcpy(data->hash, hash, sizeof(data->hash));
1201 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1202
1203 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1204
1205 return 0;
1206}
1207
Andre Guedes35815082011-05-26 16:23:53 -03001208static void hci_clear_adv_cache(unsigned long arg)
1209{
1210 struct hci_dev *hdev = (void *) arg;
1211
1212 hci_dev_lock(hdev);
1213
1214 hci_adv_entries_clear(hdev);
1215
1216 hci_dev_unlock(hdev);
1217}
1218
Andre Guedes76c86862011-05-26 16:23:50 -03001219int hci_adv_entries_clear(struct hci_dev *hdev)
1220{
1221 struct adv_entry *entry, *tmp;
1222
1223 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1224 list_del(&entry->list);
1225 kfree(entry);
1226 }
1227
1228 BT_DBG("%s adv cache cleared", hdev->name);
1229
1230 return 0;
1231}
1232
1233struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234{
1235 struct adv_entry *entry;
1236
1237 list_for_each_entry(entry, &hdev->adv_entries, list)
1238 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1239 return entry;
1240
1241 return NULL;
1242}
1243
1244static inline int is_connectable_adv(u8 evt_type)
1245{
1246 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1247 return 1;
1248
1249 return 0;
1250}
1251
1252int hci_add_adv_entry(struct hci_dev *hdev,
1253 struct hci_ev_le_advertising_info *ev)
1254{
1255 struct adv_entry *entry;
1256
1257 if (!is_connectable_adv(ev->evt_type))
1258 return -EINVAL;
1259
1260 /* Only new entries should be added to adv_entries. So, if
1261 * bdaddr was found, don't add it. */
1262 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1263 return 0;
1264
1265 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1266 if (!entry)
1267 return -ENOMEM;
1268
1269 bacpy(&entry->bdaddr, &ev->bdaddr);
1270 entry->bdaddr_type = ev->bdaddr_type;
1271
1272 list_add(&entry->list, &hdev->adv_entries);
1273
1274 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1275 batostr(&entry->bdaddr), entry->bdaddr_type);
1276
1277 return 0;
1278}
1279
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -03001280static struct crypto_blkcipher *alloc_cypher(void)
1281{
1282 if (enable_smp)
1283 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
1284
1285 return ERR_PTR(-ENOTSUPP);
1286}
1287
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288/* Register HCI device */
1289int hci_register_dev(struct hci_dev *hdev)
1290{
1291 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001292 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001294 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1295 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 if (!hdev->open || !hdev->close || !hdev->destruct)
1298 return -EINVAL;
1299
1300 write_lock_bh(&hci_dev_list_lock);
1301
1302 /* Find first available device id */
1303 list_for_each(p, &hci_dev_list) {
1304 if (list_entry(p, struct hci_dev, list)->id != id)
1305 break;
1306 head = p; id++;
1307 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 sprintf(hdev->name, "hci%d", id);
1310 hdev->id = id;
1311 list_add(&hdev->list, head);
1312
1313 atomic_set(&hdev->refcnt, 1);
1314 spin_lock_init(&hdev->lock);
1315
1316 hdev->flags = 0;
1317 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001318 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001320 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Marcel Holtmann04837f62006-07-03 10:02:33 +02001322 hdev->idle_timeout = 0;
1323 hdev->sniff_max_interval = 800;
1324 hdev->sniff_min_interval = 80;
1325
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001326 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1328 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1329
1330 skb_queue_head_init(&hdev->rx_q);
1331 skb_queue_head_init(&hdev->cmd_q);
1332 skb_queue_head_init(&hdev->raw_q);
1333
Ville Tervo6bd32322011-02-16 16:32:41 +02001334 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1335
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301336 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001337 hdev->reassembly[i] = NULL;
1338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001340 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342 inquiry_cache_init(hdev);
1343
1344 hci_conn_hash_init(hdev);
1345
David Millerea4bd8b2010-07-30 21:54:49 -07001346 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001347
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001348 INIT_LIST_HEAD(&hdev->uuids);
1349
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001350 INIT_LIST_HEAD(&hdev->link_keys);
1351
Szymon Janc2763eda2011-03-22 13:12:22 +01001352 INIT_LIST_HEAD(&hdev->remote_oob_data);
1353
Andre Guedes76c86862011-05-26 16:23:50 -03001354 INIT_LIST_HEAD(&hdev->adv_entries);
Andre Guedes35815082011-05-26 16:23:53 -03001355 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1356 (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001357
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001358 INIT_WORK(&hdev->power_on, hci_power_on);
1359 INIT_WORK(&hdev->power_off, hci_power_off);
1360 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1361
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1363
1364 atomic_set(&hdev->promisc, 0);
1365
1366 write_unlock_bh(&hci_dev_list_lock);
1367
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001368 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1369 if (!hdev->workqueue)
1370 goto nomem;
1371
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -03001372 hdev->tfm = alloc_cypher();
1373 if (IS_ERR(hdev->tfm))
1374 BT_INFO("Failed to load transform for ecb(aes): %ld",
1375 PTR_ERR(hdev->tfm));
1376
Linus Torvalds1da177e2005-04-16 15:20:36 -07001377 hci_register_sysfs(hdev);
1378
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001379 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1380 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1381 if (hdev->rfkill) {
1382 if (rfkill_register(hdev->rfkill) < 0) {
1383 rfkill_destroy(hdev->rfkill);
1384 hdev->rfkill = NULL;
1385 }
1386 }
1387
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001388 set_bit(HCI_AUTO_OFF, &hdev->flags);
1389 set_bit(HCI_SETUP, &hdev->flags);
1390 queue_work(hdev->workqueue, &hdev->power_on);
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 hci_notify(hdev, HCI_DEV_REG);
1393
1394 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001395
1396nomem:
1397 write_lock_bh(&hci_dev_list_lock);
1398 list_del(&hdev->list);
1399 write_unlock_bh(&hci_dev_list_lock);
1400
1401 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402}
1403EXPORT_SYMBOL(hci_register_dev);
1404
1405/* Unregister HCI device */
1406int hci_unregister_dev(struct hci_dev *hdev)
1407{
Marcel Holtmannef222012007-07-11 06:42:04 +02001408 int i;
1409
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001410 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 write_lock_bh(&hci_dev_list_lock);
1413 list_del(&hdev->list);
1414 write_unlock_bh(&hci_dev_list_lock);
1415
1416 hci_dev_do_close(hdev);
1417
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301418 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001419 kfree_skb(hdev->reassembly[i]);
1420
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001421 if (!test_bit(HCI_INIT, &hdev->flags) &&
1422 !test_bit(HCI_SETUP, &hdev->flags))
1423 mgmt_index_removed(hdev->id);
1424
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -03001425 if (!IS_ERR(hdev->tfm))
1426 crypto_free_blkcipher(hdev->tfm);
1427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 hci_notify(hdev, HCI_DEV_UNREG);
1429
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001430 if (hdev->rfkill) {
1431 rfkill_unregister(hdev->rfkill);
1432 rfkill_destroy(hdev->rfkill);
1433 }
1434
Dave Young147e2d52008-03-05 18:45:59 -08001435 hci_unregister_sysfs(hdev);
1436
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001437 hci_del_off_timer(hdev);
Andre Guedes35815082011-05-26 16:23:53 -03001438 del_timer(&hdev->adv_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001439
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001440 destroy_workqueue(hdev->workqueue);
1441
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001442 hci_dev_lock_bh(hdev);
1443 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001444 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001445 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001446 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001447 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001448 hci_dev_unlock_bh(hdev);
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001451
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 return 0;
1453}
1454EXPORT_SYMBOL(hci_unregister_dev);
1455
1456/* Suspend HCI device */
1457int hci_suspend_dev(struct hci_dev *hdev)
1458{
1459 hci_notify(hdev, HCI_DEV_SUSPEND);
1460 return 0;
1461}
1462EXPORT_SYMBOL(hci_suspend_dev);
1463
1464/* Resume HCI device */
1465int hci_resume_dev(struct hci_dev *hdev)
1466{
1467 hci_notify(hdev, HCI_DEV_RESUME);
1468 return 0;
1469}
1470EXPORT_SYMBOL(hci_resume_dev);
1471
Marcel Holtmann76bca882009-11-18 00:40:39 +01001472/* Receive frame from HCI drivers */
1473int hci_recv_frame(struct sk_buff *skb)
1474{
1475 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1476 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1477 && !test_bit(HCI_INIT, &hdev->flags))) {
1478 kfree_skb(skb);
1479 return -ENXIO;
1480 }
1481
1482 /* Incomming skb */
1483 bt_cb(skb)->incoming = 1;
1484
1485 /* Time stamp */
1486 __net_timestamp(skb);
1487
1488 /* Queue frame for rx task */
1489 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001490 tasklet_schedule(&hdev->rx_task);
1491
Marcel Holtmann76bca882009-11-18 00:40:39 +01001492 return 0;
1493}
1494EXPORT_SYMBOL(hci_recv_frame);
1495
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301496static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001497 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301498{
1499 int len = 0;
1500 int hlen = 0;
1501 int remain = count;
1502 struct sk_buff *skb;
1503 struct bt_skb_cb *scb;
1504
1505 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1506 index >= NUM_REASSEMBLY)
1507 return -EILSEQ;
1508
1509 skb = hdev->reassembly[index];
1510
1511 if (!skb) {
1512 switch (type) {
1513 case HCI_ACLDATA_PKT:
1514 len = HCI_MAX_FRAME_SIZE;
1515 hlen = HCI_ACL_HDR_SIZE;
1516 break;
1517 case HCI_EVENT_PKT:
1518 len = HCI_MAX_EVENT_SIZE;
1519 hlen = HCI_EVENT_HDR_SIZE;
1520 break;
1521 case HCI_SCODATA_PKT:
1522 len = HCI_MAX_SCO_SIZE;
1523 hlen = HCI_SCO_HDR_SIZE;
1524 break;
1525 }
1526
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001527 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301528 if (!skb)
1529 return -ENOMEM;
1530
1531 scb = (void *) skb->cb;
1532 scb->expect = hlen;
1533 scb->pkt_type = type;
1534
1535 skb->dev = (void *) hdev;
1536 hdev->reassembly[index] = skb;
1537 }
1538
1539 while (count) {
1540 scb = (void *) skb->cb;
1541 len = min(scb->expect, (__u16)count);
1542
1543 memcpy(skb_put(skb, len), data, len);
1544
1545 count -= len;
1546 data += len;
1547 scb->expect -= len;
1548 remain = count;
1549
1550 switch (type) {
1551 case HCI_EVENT_PKT:
1552 if (skb->len == HCI_EVENT_HDR_SIZE) {
1553 struct hci_event_hdr *h = hci_event_hdr(skb);
1554 scb->expect = h->plen;
1555
1556 if (skb_tailroom(skb) < scb->expect) {
1557 kfree_skb(skb);
1558 hdev->reassembly[index] = NULL;
1559 return -ENOMEM;
1560 }
1561 }
1562 break;
1563
1564 case HCI_ACLDATA_PKT:
1565 if (skb->len == HCI_ACL_HDR_SIZE) {
1566 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1567 scb->expect = __le16_to_cpu(h->dlen);
1568
1569 if (skb_tailroom(skb) < scb->expect) {
1570 kfree_skb(skb);
1571 hdev->reassembly[index] = NULL;
1572 return -ENOMEM;
1573 }
1574 }
1575 break;
1576
1577 case HCI_SCODATA_PKT:
1578 if (skb->len == HCI_SCO_HDR_SIZE) {
1579 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1580 scb->expect = h->dlen;
1581
1582 if (skb_tailroom(skb) < scb->expect) {
1583 kfree_skb(skb);
1584 hdev->reassembly[index] = NULL;
1585 return -ENOMEM;
1586 }
1587 }
1588 break;
1589 }
1590
1591 if (scb->expect == 0) {
1592 /* Complete frame */
1593
1594 bt_cb(skb)->pkt_type = type;
1595 hci_recv_frame(skb);
1596
1597 hdev->reassembly[index] = NULL;
1598 return remain;
1599 }
1600 }
1601
1602 return remain;
1603}
1604
Marcel Holtmannef222012007-07-11 06:42:04 +02001605int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1606{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301607 int rem = 0;
1608
Marcel Holtmannef222012007-07-11 06:42:04 +02001609 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1610 return -EILSEQ;
1611
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001612 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001613 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301614 if (rem < 0)
1615 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001616
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301617 data += (count - rem);
1618 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001619 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001620
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301621 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001622}
1623EXPORT_SYMBOL(hci_recv_fragment);
1624
Suraj Sumangala99811512010-07-14 13:02:19 +05301625#define STREAM_REASSEMBLY 0
1626
1627int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1628{
1629 int type;
1630 int rem = 0;
1631
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001632 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301633 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1634
1635 if (!skb) {
1636 struct { char type; } *pkt;
1637
1638 /* Start of the frame */
1639 pkt = data;
1640 type = pkt->type;
1641
1642 data++;
1643 count--;
1644 } else
1645 type = bt_cb(skb)->pkt_type;
1646
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001647 rem = hci_reassembly(hdev, type, data, count,
1648 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301649 if (rem < 0)
1650 return rem;
1651
1652 data += (count - rem);
1653 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001654 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301655
1656 return rem;
1657}
1658EXPORT_SYMBOL(hci_recv_stream_fragment);
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660/* ---- Interface to upper protocols ---- */
1661
1662/* Register/Unregister protocols.
1663 * hci_task_lock is used to ensure that no tasks are running. */
1664int hci_register_proto(struct hci_proto *hp)
1665{
1666 int err = 0;
1667
1668 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1669
1670 if (hp->id >= HCI_MAX_PROTO)
1671 return -EINVAL;
1672
1673 write_lock_bh(&hci_task_lock);
1674
1675 if (!hci_proto[hp->id])
1676 hci_proto[hp->id] = hp;
1677 else
1678 err = -EEXIST;
1679
1680 write_unlock_bh(&hci_task_lock);
1681
1682 return err;
1683}
1684EXPORT_SYMBOL(hci_register_proto);
1685
1686int hci_unregister_proto(struct hci_proto *hp)
1687{
1688 int err = 0;
1689
1690 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1691
1692 if (hp->id >= HCI_MAX_PROTO)
1693 return -EINVAL;
1694
1695 write_lock_bh(&hci_task_lock);
1696
1697 if (hci_proto[hp->id])
1698 hci_proto[hp->id] = NULL;
1699 else
1700 err = -ENOENT;
1701
1702 write_unlock_bh(&hci_task_lock);
1703
1704 return err;
1705}
1706EXPORT_SYMBOL(hci_unregister_proto);
1707
1708int hci_register_cb(struct hci_cb *cb)
1709{
1710 BT_DBG("%p name %s", cb, cb->name);
1711
1712 write_lock_bh(&hci_cb_list_lock);
1713 list_add(&cb->list, &hci_cb_list);
1714 write_unlock_bh(&hci_cb_list_lock);
1715
1716 return 0;
1717}
1718EXPORT_SYMBOL(hci_register_cb);
1719
1720int hci_unregister_cb(struct hci_cb *cb)
1721{
1722 BT_DBG("%p name %s", cb, cb->name);
1723
1724 write_lock_bh(&hci_cb_list_lock);
1725 list_del(&cb->list);
1726 write_unlock_bh(&hci_cb_list_lock);
1727
1728 return 0;
1729}
1730EXPORT_SYMBOL(hci_unregister_cb);
1731
1732static int hci_send_frame(struct sk_buff *skb)
1733{
1734 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1735
1736 if (!hdev) {
1737 kfree_skb(skb);
1738 return -ENODEV;
1739 }
1740
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001741 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
1743 if (atomic_read(&hdev->promisc)) {
1744 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001745 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001747 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 }
1749
1750 /* Get rid of skb owner, prior to sending to the driver. */
1751 skb_orphan(skb);
1752
1753 return hdev->send(skb);
1754}
1755
1756/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001757int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758{
1759 int len = HCI_COMMAND_HDR_SIZE + plen;
1760 struct hci_command_hdr *hdr;
1761 struct sk_buff *skb;
1762
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001763 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
1765 skb = bt_skb_alloc(len, GFP_ATOMIC);
1766 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001767 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 return -ENOMEM;
1769 }
1770
1771 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001772 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 hdr->plen = plen;
1774
1775 if (plen)
1776 memcpy(skb_put(skb, plen), param, plen);
1777
1778 BT_DBG("skb len %d", skb->len);
1779
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001780 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001782
Johan Hedberga5040ef2011-01-10 13:28:59 +02001783 if (test_bit(HCI_INIT, &hdev->flags))
1784 hdev->init_last_cmd = opcode;
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001787 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 return 0;
1790}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
1792/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001793void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
1795 struct hci_command_hdr *hdr;
1796
1797 if (!hdev->sent_cmd)
1798 return NULL;
1799
1800 hdr = (void *) hdev->sent_cmd->data;
1801
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001802 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 return NULL;
1804
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001805 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
1807 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1808}
1809
1810/* Send ACL data */
1811static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1812{
1813 struct hci_acl_hdr *hdr;
1814 int len = skb->len;
1815
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001816 skb_push(skb, HCI_ACL_HDR_SIZE);
1817 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001818 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001819 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1820 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821}
1822
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001823void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824{
1825 struct hci_dev *hdev = conn->hdev;
1826 struct sk_buff *list;
1827
1828 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1829
1830 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001831 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001832 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001834 list = skb_shinfo(skb)->frag_list;
1835 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 /* Non fragmented */
1837 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1838
1839 skb_queue_tail(&conn->data_q, skb);
1840 } else {
1841 /* Fragmented */
1842 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1843
1844 skb_shinfo(skb)->frag_list = NULL;
1845
1846 /* Queue all fragments atomically */
1847 spin_lock_bh(&conn->data_q.lock);
1848
1849 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001850
1851 flags &= ~ACL_START;
1852 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 do {
1854 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001857 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001858 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859
1860 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1861
1862 __skb_queue_tail(&conn->data_q, skb);
1863 } while (list);
1864
1865 spin_unlock_bh(&conn->data_q.lock);
1866 }
1867
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001868 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869}
1870EXPORT_SYMBOL(hci_send_acl);
1871
1872/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001873void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874{
1875 struct hci_dev *hdev = conn->hdev;
1876 struct hci_sco_hdr hdr;
1877
1878 BT_DBG("%s len %d", hdev->name, skb->len);
1879
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001880 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 hdr.dlen = skb->len;
1882
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001883 skb_push(skb, HCI_SCO_HDR_SIZE);
1884 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001885 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886
1887 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001888 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001891 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892}
1893EXPORT_SYMBOL(hci_send_sco);
1894
1895/* ---- HCI TX task (outgoing data) ---- */
1896
1897/* HCI Connection scheduler */
1898static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1899{
1900 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001901 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 int num = 0, min = ~0;
1903 struct list_head *p;
1904
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001905 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 * added and removed with TX task disabled. */
1907 list_for_each(p, &h->list) {
1908 struct hci_conn *c;
1909 c = list_entry(p, struct hci_conn, list);
1910
Marcel Holtmann769be972008-07-14 20:13:49 +02001911 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001913
1914 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1915 continue;
1916
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 num++;
1918
1919 if (c->sent < min) {
1920 min = c->sent;
1921 conn = c;
1922 }
1923 }
1924
1925 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001926 int cnt, q;
1927
1928 switch (conn->type) {
1929 case ACL_LINK:
1930 cnt = hdev->acl_cnt;
1931 break;
1932 case SCO_LINK:
1933 case ESCO_LINK:
1934 cnt = hdev->sco_cnt;
1935 break;
1936 case LE_LINK:
1937 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1938 break;
1939 default:
1940 cnt = 0;
1941 BT_ERR("Unknown link type");
1942 }
1943
1944 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 *quote = q ? q : 1;
1946 } else
1947 *quote = 0;
1948
1949 BT_DBG("conn %p quote %d", conn, *quote);
1950 return conn;
1951}
1952
Ville Tervobae1f5d2011-02-10 22:38:53 -03001953static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
1955 struct hci_conn_hash *h = &hdev->conn_hash;
1956 struct list_head *p;
1957 struct hci_conn *c;
1958
Ville Tervobae1f5d2011-02-10 22:38:53 -03001959 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 /* Kill stalled connections */
1962 list_for_each(p, &h->list) {
1963 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03001964 if (c->type == type && c->sent) {
1965 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 hdev->name, batostr(&c->dst));
1967 hci_acl_disconn(c, 0x13);
1968 }
1969 }
1970}
1971
1972static inline void hci_sched_acl(struct hci_dev *hdev)
1973{
1974 struct hci_conn *conn;
1975 struct sk_buff *skb;
1976 int quote;
1977
1978 BT_DBG("%s", hdev->name);
1979
1980 if (!test_bit(HCI_RAW, &hdev->flags)) {
1981 /* ACL tx timeout must be longer than maximum
1982 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001983 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001984 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
1986
1987 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1988 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1989 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001990
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07001991 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 hci_send_frame(skb);
1994 hdev->acl_last_tx = jiffies;
1995
1996 hdev->acl_cnt--;
1997 conn->sent++;
1998 }
1999 }
2000}
2001
2002/* Schedule SCO */
2003static inline void hci_sched_sco(struct hci_dev *hdev)
2004{
2005 struct hci_conn *conn;
2006 struct sk_buff *skb;
2007 int quote;
2008
2009 BT_DBG("%s", hdev->name);
2010
2011 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2012 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2013 BT_DBG("skb %p len %d", skb, skb->len);
2014 hci_send_frame(skb);
2015
2016 conn->sent++;
2017 if (conn->sent == ~0)
2018 conn->sent = 0;
2019 }
2020 }
2021}
2022
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002023static inline void hci_sched_esco(struct hci_dev *hdev)
2024{
2025 struct hci_conn *conn;
2026 struct sk_buff *skb;
2027 int quote;
2028
2029 BT_DBG("%s", hdev->name);
2030
2031 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2032 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2033 BT_DBG("skb %p len %d", skb, skb->len);
2034 hci_send_frame(skb);
2035
2036 conn->sent++;
2037 if (conn->sent == ~0)
2038 conn->sent = 0;
2039 }
2040 }
2041}
2042
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002043static inline void hci_sched_le(struct hci_dev *hdev)
2044{
2045 struct hci_conn *conn;
2046 struct sk_buff *skb;
2047 int quote, cnt;
2048
2049 BT_DBG("%s", hdev->name);
2050
2051 if (!test_bit(HCI_RAW, &hdev->flags)) {
2052 /* LE tx timeout must be longer than maximum
2053 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002054 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002055 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002056 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002057 }
2058
2059 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2060 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2061 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2062 BT_DBG("skb %p len %d", skb, skb->len);
2063
2064 hci_send_frame(skb);
2065 hdev->le_last_tx = jiffies;
2066
2067 cnt--;
2068 conn->sent++;
2069 }
2070 }
2071 if (hdev->le_pkts)
2072 hdev->le_cnt = cnt;
2073 else
2074 hdev->acl_cnt = cnt;
2075}
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077static void hci_tx_task(unsigned long arg)
2078{
2079 struct hci_dev *hdev = (struct hci_dev *) arg;
2080 struct sk_buff *skb;
2081
2082 read_lock(&hci_task_lock);
2083
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002084 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2085 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086
2087 /* Schedule queues and send stuff to HCI driver */
2088
2089 hci_sched_acl(hdev);
2090
2091 hci_sched_sco(hdev);
2092
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002093 hci_sched_esco(hdev);
2094
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002095 hci_sched_le(hdev);
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 /* Send next queued raw (unknown type) packet */
2098 while ((skb = skb_dequeue(&hdev->raw_q)))
2099 hci_send_frame(skb);
2100
2101 read_unlock(&hci_task_lock);
2102}
2103
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002104/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105
2106/* ACL data packet */
2107static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2108{
2109 struct hci_acl_hdr *hdr = (void *) skb->data;
2110 struct hci_conn *conn;
2111 __u16 handle, flags;
2112
2113 skb_pull(skb, HCI_ACL_HDR_SIZE);
2114
2115 handle = __le16_to_cpu(hdr->handle);
2116 flags = hci_flags(handle);
2117 handle = hci_handle(handle);
2118
2119 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2120
2121 hdev->stat.acl_rx++;
2122
2123 hci_dev_lock(hdev);
2124 conn = hci_conn_hash_lookup_handle(hdev, handle);
2125 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 if (conn) {
2128 register struct hci_proto *hp;
2129
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -07002130 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002133 hp = hci_proto[HCI_PROTO_L2CAP];
2134 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 hp->recv_acldata(conn, skb, flags);
2136 return;
2137 }
2138 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002139 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 hdev->name, handle);
2141 }
2142
2143 kfree_skb(skb);
2144}
2145
2146/* SCO data packet */
2147static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2148{
2149 struct hci_sco_hdr *hdr = (void *) skb->data;
2150 struct hci_conn *conn;
2151 __u16 handle;
2152
2153 skb_pull(skb, HCI_SCO_HDR_SIZE);
2154
2155 handle = __le16_to_cpu(hdr->handle);
2156
2157 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2158
2159 hdev->stat.sco_rx++;
2160
2161 hci_dev_lock(hdev);
2162 conn = hci_conn_hash_lookup_handle(hdev, handle);
2163 hci_dev_unlock(hdev);
2164
2165 if (conn) {
2166 register struct hci_proto *hp;
2167
2168 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002169 hp = hci_proto[HCI_PROTO_SCO];
2170 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 hp->recv_scodata(conn, skb);
2172 return;
2173 }
2174 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002175 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 hdev->name, handle);
2177 }
2178
2179 kfree_skb(skb);
2180}
2181
Marcel Holtmann65164552005-10-28 19:20:48 +02002182static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183{
2184 struct hci_dev *hdev = (struct hci_dev *) arg;
2185 struct sk_buff *skb;
2186
2187 BT_DBG("%s", hdev->name);
2188
2189 read_lock(&hci_task_lock);
2190
2191 while ((skb = skb_dequeue(&hdev->rx_q))) {
2192 if (atomic_read(&hdev->promisc)) {
2193 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002194 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 }
2196
2197 if (test_bit(HCI_RAW, &hdev->flags)) {
2198 kfree_skb(skb);
2199 continue;
2200 }
2201
2202 if (test_bit(HCI_INIT, &hdev->flags)) {
2203 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002204 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 case HCI_ACLDATA_PKT:
2206 case HCI_SCODATA_PKT:
2207 kfree_skb(skb);
2208 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 }
2211
2212 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002213 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 case HCI_EVENT_PKT:
2215 hci_event_packet(hdev, skb);
2216 break;
2217
2218 case HCI_ACLDATA_PKT:
2219 BT_DBG("%s ACL data packet", hdev->name);
2220 hci_acldata_packet(hdev, skb);
2221 break;
2222
2223 case HCI_SCODATA_PKT:
2224 BT_DBG("%s SCO data packet", hdev->name);
2225 hci_scodata_packet(hdev, skb);
2226 break;
2227
2228 default:
2229 kfree_skb(skb);
2230 break;
2231 }
2232 }
2233
2234 read_unlock(&hci_task_lock);
2235}
2236
2237static void hci_cmd_task(unsigned long arg)
2238{
2239 struct hci_dev *hdev = (struct hci_dev *) arg;
2240 struct sk_buff *skb;
2241
2242 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002245 if (atomic_read(&hdev->cmd_cnt)) {
2246 skb = skb_dequeue(&hdev->cmd_q);
2247 if (!skb)
2248 return;
2249
Wei Yongjun7585b972009-02-25 18:29:52 +08002250 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002252 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2253 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 atomic_dec(&hdev->cmd_cnt);
2255 hci_send_frame(skb);
Ville Tervo6bd32322011-02-16 16:32:41 +02002256 mod_timer(&hdev->cmd_timer,
2257 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 } else {
2259 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002260 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
2262 }
2263}
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -03002264
2265module_param(enable_smp, bool, 0644);
2266MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");