blob: 702d5651c656d21d4e2bcd86c4d01df55499e0ab [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/sock.h>
46
47#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
59static void hci_notify(struct hci_dev *hdev, int event);
60
61static DEFINE_RWLOCK(hci_task_lock);
62
63/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
71/* HCI protocols */
72#define HCI_MAX_PROTO 2
73struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080076static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78/* ---- HCI notifications ---- */
79
80int hci_register_notifier(struct notifier_block *nb)
81{
Alan Sterne041c682006-03-27 01:16:30 -080082 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
85int hci_unregister_notifier(struct notifier_block *nb)
86{
Alan Sterne041c682006-03-27 01:16:30 -080087 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088}
89
Marcel Holtmann65164552005-10-28 19:20:48 +020090static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Alan Sterne041c682006-03-27 01:16:30 -080092 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093}
94
95/* ---- HCI requests ---- */
96
Johan Hedberg23bb5762010-12-21 23:01:27 +020097void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070098{
Johan Hedberg23bb5762010-12-21 23:01:27 +020099 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
Johan Hedberga5040ef2011-01-10 13:28:59 +0200101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
103 */
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_err(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200194 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 BT_DBG("%s %ld", hdev->name, opt);
200
201 /* Driver initialization */
202
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100209 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211 skb_queue_purge(&hdev->driver_init);
212
213 /* Mandatory initialization */
214
215 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
219 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200222 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228#if 0
229 /* Host buffer size */
230 {
231 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700232 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 cp.acl_max_pkt = cpu_to_le16(0xffff);
235 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200236 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 }
238#endif
239
240 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
242
243 /* Read Class of Device */
244 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
245
246 /* Read Local Name */
247 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248
249 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Optional initialization */
253
254 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200255 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700259 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200260 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200261
262 bacpy(&cp.bdaddr, BDADDR_ANY);
263 cp.delete_all = 1;
264 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300267static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
268{
269 BT_DBG("%s", hdev->name);
270
271 /* Read LE buffer size */
272 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
276{
277 __u8 scan = opt;
278
279 BT_DBG("%s %x", hdev->name, scan);
280
281 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200282 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
286{
287 __u8 auth = opt;
288
289 BT_DBG("%s %x", hdev->name, auth);
290
291 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200292 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
295static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 encrypt = opt;
298
299 BT_DBG("%s %x", hdev->name, encrypt);
300
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200301 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200305static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __le16 policy = cpu_to_le16(opt);
308
Marcel Holtmanna418b892008-11-30 12:17:28 +0100309 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310
311 /* Default link policy */
312 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
313}
314
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900315/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 * Device is held on return. */
317struct hci_dev *hci_dev_get(int index)
318{
319 struct hci_dev *hdev = NULL;
320 struct list_head *p;
321
322 BT_DBG("%d", index);
323
324 if (index < 0)
325 return NULL;
326
327 read_lock(&hci_dev_list_lock);
328 list_for_each(p, &hci_dev_list) {
329 struct hci_dev *d = list_entry(p, struct hci_dev, list);
330 if (d->id == index) {
331 hdev = hci_dev_hold(d);
332 break;
333 }
334 }
335 read_unlock(&hci_dev_list_lock);
336 return hdev;
337}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339/* ---- Inquiry support ---- */
340static void inquiry_cache_flush(struct hci_dev *hdev)
341{
342 struct inquiry_cache *cache = &hdev->inq_cache;
343 struct inquiry_entry *next = cache->list, *e;
344
345 BT_DBG("cache %p", cache);
346
347 cache->list = NULL;
348 while ((e = next)) {
349 next = e->next;
350 kfree(e);
351 }
352}
353
354struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
355{
356 struct inquiry_cache *cache = &hdev->inq_cache;
357 struct inquiry_entry *e;
358
359 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
360
361 for (e = cache->list; e; e = e->next)
362 if (!bacmp(&e->data.bdaddr, bdaddr))
363 break;
364 return e;
365}
366
367void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
368{
369 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200370 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
373
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200374 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
375 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200377 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
378 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200380
381 ie->next = cache->list;
382 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 }
384
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200385 memcpy(&ie->data, data, sizeof(*data));
386 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 cache->timestamp = jiffies;
388}
389
390static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
391{
392 struct inquiry_cache *cache = &hdev->inq_cache;
393 struct inquiry_info *info = (struct inquiry_info *) buf;
394 struct inquiry_entry *e;
395 int copied = 0;
396
397 for (e = cache->list; e && copied < num; e = e->next, copied++) {
398 struct inquiry_data *data = &e->data;
399 bacpy(&info->bdaddr, &data->bdaddr);
400 info->pscan_rep_mode = data->pscan_rep_mode;
401 info->pscan_period_mode = data->pscan_period_mode;
402 info->pscan_mode = data->pscan_mode;
403 memcpy(info->dev_class, data->dev_class, 3);
404 info->clock_offset = data->clock_offset;
405 info++;
406 }
407
408 BT_DBG("cache %p, copied %d", cache, copied);
409 return copied;
410}
411
412static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
413{
414 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
415 struct hci_cp_inquiry cp;
416
417 BT_DBG("%s", hdev->name);
418
419 if (test_bit(HCI_INQUIRY, &hdev->flags))
420 return;
421
422 /* Start Inquiry */
423 memcpy(&cp.lap, &ir->lap, 3);
424 cp.length = ir->length;
425 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200426 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
428
429int hci_inquiry(void __user *arg)
430{
431 __u8 __user *ptr = arg;
432 struct hci_inquiry_req ir;
433 struct hci_dev *hdev;
434 int err = 0, do_inquiry = 0, max_rsp;
435 long timeo;
436 __u8 *buf;
437
438 if (copy_from_user(&ir, ptr, sizeof(ir)))
439 return -EFAULT;
440
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200441 hdev = hci_dev_get(ir.dev_id);
442 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 return -ENODEV;
444
445 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900446 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200447 inquiry_cache_empty(hdev) ||
448 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 inquiry_cache_flush(hdev);
450 do_inquiry = 1;
451 }
452 hci_dev_unlock_bh(hdev);
453
Marcel Holtmann04837f62006-07-03 10:02:33 +0200454 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200455
456 if (do_inquiry) {
457 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
458 if (err < 0)
459 goto done;
460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 /* for unlimited number of responses we will use buffer with 255 entries */
463 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
464
465 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
466 * copy it to the user space.
467 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200468 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
469 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 err = -ENOMEM;
471 goto done;
472 }
473
474 hci_dev_lock_bh(hdev);
475 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
476 hci_dev_unlock_bh(hdev);
477
478 BT_DBG("num_rsp %d", ir.num_rsp);
479
480 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
481 ptr += sizeof(ir);
482 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
483 ir.num_rsp))
484 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900485 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 err = -EFAULT;
487
488 kfree(buf);
489
490done:
491 hci_dev_put(hdev);
492 return err;
493}
494
495/* ---- HCI ioctl helpers ---- */
496
497int hci_dev_open(__u16 dev)
498{
499 struct hci_dev *hdev;
500 int ret = 0;
501
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200502 hdev = hci_dev_get(dev);
503 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 return -ENODEV;
505
506 BT_DBG("%s %p", hdev->name, hdev);
507
508 hci_req_lock(hdev);
509
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200510 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
511 ret = -ERFKILL;
512 goto done;
513 }
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (test_bit(HCI_UP, &hdev->flags)) {
516 ret = -EALREADY;
517 goto done;
518 }
519
520 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
521 set_bit(HCI_RAW, &hdev->flags);
522
Marcel Holtmann943da252010-02-13 02:28:41 +0100523 /* Treat all non BR/EDR controllers as raw devices for now */
524 if (hdev->dev_type != HCI_BREDR)
525 set_bit(HCI_RAW, &hdev->flags);
526
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 if (hdev->open(hdev)) {
528 ret = -EIO;
529 goto done;
530 }
531
532 if (!test_bit(HCI_RAW, &hdev->flags)) {
533 atomic_set(&hdev->cmd_cnt, 1);
534 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200535 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300541 if (lmp_le_capable(hdev))
542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 clear_bit(HCI_INIT, &hdev->flags);
546 }
547
548 if (!ret) {
549 hci_dev_hold(hdev);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900554 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
559
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
562
563 if (hdev->flush)
564 hdev->flush(hdev);
565
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
569 }
570
571 hdev->close(hdev);
572 hdev->flags = 0;
573 }
574
575done:
576 hci_req_unlock(hdev);
577 hci_dev_put(hdev);
578 return ret;
579}
580
581static int hci_dev_do_close(struct hci_dev *hdev)
582{
583 BT_DBG("%s %p", hdev->name, hdev);
584
585 hci_req_cancel(hdev, ENODEV);
586 hci_req_lock(hdev);
587
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
589 hci_req_unlock(hdev);
590 return 0;
591 }
592
593 /* Kill RX and TX tasks */
594 tasklet_kill(&hdev->rx_task);
595 tasklet_kill(&hdev->tx_task);
596
597 hci_dev_lock_bh(hdev);
598 inquiry_cache_flush(hdev);
599 hci_conn_hash_flush(hdev);
600 hci_dev_unlock_bh(hdev);
601
602 hci_notify(hdev, HCI_DEV_DOWN);
603
604 if (hdev->flush)
605 hdev->flush(hdev);
606
607 /* Reset device */
608 skb_queue_purge(&hdev->cmd_q);
609 atomic_set(&hdev->cmd_cnt, 1);
610 if (!test_bit(HCI_RAW, &hdev->flags)) {
611 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200612 __hci_request(hdev, hci_reset_req, 0,
613 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 clear_bit(HCI_INIT, &hdev->flags);
615 }
616
617 /* Kill cmd task */
618 tasklet_kill(&hdev->cmd_task);
619
620 /* Drop queues */
621 skb_queue_purge(&hdev->rx_q);
622 skb_queue_purge(&hdev->cmd_q);
623 skb_queue_purge(&hdev->raw_q);
624
625 /* Drop last sent command */
626 if (hdev->sent_cmd) {
Ville Tervo6bd32322011-02-16 16:32:41 +0200627 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 kfree_skb(hdev->sent_cmd);
629 hdev->sent_cmd = NULL;
630 }
631
632 /* After this point our queues are empty
633 * and no tasks are scheduled. */
634 hdev->close(hdev);
635
Johan Hedberg5add6af2010-12-16 10:00:37 +0200636 mgmt_powered(hdev->id, 0);
637
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 /* Clear flags */
639 hdev->flags = 0;
640
641 hci_req_unlock(hdev);
642
643 hci_dev_put(hdev);
644 return 0;
645}
646
647int hci_dev_close(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int err;
651
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655 err = hci_dev_do_close(hdev);
656 hci_dev_put(hdev);
657 return err;
658}
659
660int hci_dev_reset(__u16 dev)
661{
662 struct hci_dev *hdev;
663 int ret = 0;
664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200665 hdev = hci_dev_get(dev);
666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 return -ENODEV;
668
669 hci_req_lock(hdev);
670 tasklet_disable(&hdev->tx_task);
671
672 if (!test_bit(HCI_UP, &hdev->flags))
673 goto done;
674
675 /* Drop queues */
676 skb_queue_purge(&hdev->rx_q);
677 skb_queue_purge(&hdev->cmd_q);
678
679 hci_dev_lock_bh(hdev);
680 inquiry_cache_flush(hdev);
681 hci_conn_hash_flush(hdev);
682 hci_dev_unlock_bh(hdev);
683
684 if (hdev->flush)
685 hdev->flush(hdev);
686
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900687 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300688 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200691 ret = __hci_request(hdev, hci_reset_req, 0,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694done:
695 tasklet_enable(&hdev->tx_task);
696 hci_req_unlock(hdev);
697 hci_dev_put(hdev);
698 return ret;
699}
700
701int hci_dev_reset_stat(__u16 dev)
702{
703 struct hci_dev *hdev;
704 int ret = 0;
705
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200706 hdev = hci_dev_get(dev);
707 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 return -ENODEV;
709
710 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
711
712 hci_dev_put(hdev);
713
714 return ret;
715}
716
717int hci_dev_cmd(unsigned int cmd, void __user *arg)
718{
719 struct hci_dev *hdev;
720 struct hci_dev_req dr;
721 int err = 0;
722
723 if (copy_from_user(&dr, arg, sizeof(dr)))
724 return -EFAULT;
725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200726 hdev = hci_dev_get(dr.dev_id);
727 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return -ENODEV;
729
730 switch (cmd) {
731 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200732 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
733 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 break;
735
736 case HCISETENCRYPT:
737 if (!lmp_encrypt_capable(hdev)) {
738 err = -EOPNOTSUPP;
739 break;
740 }
741
742 if (!test_bit(HCI_AUTH, &hdev->flags)) {
743 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200744 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (err)
747 break;
748 }
749
Marcel Holtmann04837f62006-07-03 10:02:33 +0200750 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
751 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 break;
753
754 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200755 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
756 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 break;
758
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200759 case HCISETLINKPOL:
760 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
761 msecs_to_jiffies(HCI_INIT_TIMEOUT));
762 break;
763
764 case HCISETLINKMODE:
765 hdev->link_mode = ((__u16) dr.dev_opt) &
766 (HCI_LM_MASTER | HCI_LM_ACCEPT);
767 break;
768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 case HCISETPTYPE:
770 hdev->pkt_type = (__u16) dr.dev_opt;
771 break;
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200774 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
775 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 break;
777
778 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200779 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
780 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 break;
782
783 default:
784 err = -EINVAL;
785 break;
786 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 hci_dev_put(hdev);
789 return err;
790}
791
792int hci_get_dev_list(void __user *arg)
793{
794 struct hci_dev_list_req *dl;
795 struct hci_dev_req *dr;
796 struct list_head *p;
797 int n = 0, size, err;
798 __u16 dev_num;
799
800 if (get_user(dev_num, (__u16 __user *) arg))
801 return -EFAULT;
802
803 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
804 return -EINVAL;
805
806 size = sizeof(*dl) + dev_num * sizeof(*dr);
807
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200808 dl = kzalloc(size, GFP_KERNEL);
809 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 return -ENOMEM;
811
812 dr = dl->dev_req;
813
814 read_lock_bh(&hci_dev_list_lock);
815 list_for_each(p, &hci_dev_list) {
816 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200819
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200820 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200821
822 if (!test_bit(HCI_MGMT, &hdev->flags))
823 set_bit(HCI_PAIRABLE, &hdev->flags);
824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 (dr + n)->dev_id = hdev->id;
826 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 if (++n >= dev_num)
829 break;
830 }
831 read_unlock_bh(&hci_dev_list_lock);
832
833 dl->dev_num = n;
834 size = sizeof(*dl) + n * sizeof(*dr);
835
836 err = copy_to_user(arg, dl, size);
837 kfree(dl);
838
839 return err ? -EFAULT : 0;
840}
841
842int hci_get_dev_info(void __user *arg)
843{
844 struct hci_dev *hdev;
845 struct hci_dev_info di;
846 int err = 0;
847
848 if (copy_from_user(&di, arg, sizeof(di)))
849 return -EFAULT;
850
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200851 hdev = hci_dev_get(di.dev_id);
852 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 return -ENODEV;
854
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200855 hci_del_off_timer(hdev);
856
Johan Hedbergc542a062011-01-26 13:11:03 +0200857 if (!test_bit(HCI_MGMT, &hdev->flags))
858 set_bit(HCI_PAIRABLE, &hdev->flags);
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 strcpy(di.name, hdev->name);
861 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100862 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 di.flags = hdev->flags;
864 di.pkt_type = hdev->pkt_type;
865 di.acl_mtu = hdev->acl_mtu;
866 di.acl_pkts = hdev->acl_pkts;
867 di.sco_mtu = hdev->sco_mtu;
868 di.sco_pkts = hdev->sco_pkts;
869 di.link_policy = hdev->link_policy;
870 di.link_mode = hdev->link_mode;
871
872 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
873 memcpy(&di.features, &hdev->features, sizeof(di.features));
874
875 if (copy_to_user(arg, &di, sizeof(di)))
876 err = -EFAULT;
877
878 hci_dev_put(hdev);
879
880 return err;
881}
882
883/* ---- Interface to HCI drivers ---- */
884
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200885static int hci_rfkill_set_block(void *data, bool blocked)
886{
887 struct hci_dev *hdev = data;
888
889 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
890
891 if (!blocked)
892 return 0;
893
894 hci_dev_do_close(hdev);
895
896 return 0;
897}
898
899static const struct rfkill_ops hci_rfkill_ops = {
900 .set_block = hci_rfkill_set_block,
901};
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903/* Alloc HCI device */
904struct hci_dev *hci_alloc_dev(void)
905{
906 struct hci_dev *hdev;
907
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200908 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 if (!hdev)
910 return NULL;
911
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 skb_queue_head_init(&hdev->driver_init);
913
914 return hdev;
915}
916EXPORT_SYMBOL(hci_alloc_dev);
917
918/* Free HCI device */
919void hci_free_dev(struct hci_dev *hdev)
920{
921 skb_queue_purge(&hdev->driver_init);
922
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200923 /* will free via device release */
924 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926EXPORT_SYMBOL(hci_free_dev);
927
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200928static void hci_power_on(struct work_struct *work)
929{
930 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
931
932 BT_DBG("%s", hdev->name);
933
934 if (hci_dev_open(hdev->id) < 0)
935 return;
936
937 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
938 mod_timer(&hdev->off_timer,
939 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
940
941 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
942 mgmt_index_added(hdev->id);
943}
944
945static void hci_power_off(struct work_struct *work)
946{
947 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
948
949 BT_DBG("%s", hdev->name);
950
951 hci_dev_close(hdev->id);
952}
953
954static void hci_auto_off(unsigned long data)
955{
956 struct hci_dev *hdev = (struct hci_dev *) data;
957
958 BT_DBG("%s", hdev->name);
959
960 clear_bit(HCI_AUTO_OFF, &hdev->flags);
961
962 queue_work(hdev->workqueue, &hdev->power_off);
963}
964
965void hci_del_off_timer(struct hci_dev *hdev)
966{
967 BT_DBG("%s", hdev->name);
968
969 clear_bit(HCI_AUTO_OFF, &hdev->flags);
970 del_timer(&hdev->off_timer);
971}
972
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200973int hci_uuids_clear(struct hci_dev *hdev)
974{
975 struct list_head *p, *n;
976
977 list_for_each_safe(p, n, &hdev->uuids) {
978 struct bt_uuid *uuid;
979
980 uuid = list_entry(p, struct bt_uuid, list);
981
982 list_del(p);
983 kfree(uuid);
984 }
985
986 return 0;
987}
988
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200989int hci_link_keys_clear(struct hci_dev *hdev)
990{
991 struct list_head *p, *n;
992
993 list_for_each_safe(p, n, &hdev->link_keys) {
994 struct link_key *key;
995
996 key = list_entry(p, struct link_key, list);
997
998 list_del(p);
999 kfree(key);
1000 }
1001
1002 return 0;
1003}
1004
1005struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1006{
1007 struct list_head *p;
1008
1009 list_for_each(p, &hdev->link_keys) {
1010 struct link_key *k;
1011
1012 k = list_entry(p, struct link_key, list);
1013
1014 if (bacmp(bdaddr, &k->bdaddr) == 0)
1015 return k;
1016 }
1017
1018 return NULL;
1019}
1020
1021int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1022 u8 *val, u8 type, u8 pin_len)
1023{
1024 struct link_key *key, *old_key;
1025 u8 old_key_type;
1026
1027 old_key = hci_find_link_key(hdev, bdaddr);
1028 if (old_key) {
1029 old_key_type = old_key->type;
1030 key = old_key;
1031 } else {
1032 old_key_type = 0xff;
1033 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1034 if (!key)
1035 return -ENOMEM;
1036 list_add(&key->list, &hdev->link_keys);
1037 }
1038
1039 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1040
1041 bacpy(&key->bdaddr, bdaddr);
1042 memcpy(key->val, val, 16);
1043 key->type = type;
1044 key->pin_len = pin_len;
1045
1046 if (new_key)
1047 mgmt_new_key(hdev->id, key, old_key_type);
1048
1049 if (type == 0x06)
1050 key->type = old_key_type;
1051
1052 return 0;
1053}
1054
1055int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1056{
1057 struct link_key *key;
1058
1059 key = hci_find_link_key(hdev, bdaddr);
1060 if (!key)
1061 return -ENOENT;
1062
1063 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1064
1065 list_del(&key->list);
1066 kfree(key);
1067
1068 return 0;
1069}
1070
Ville Tervo6bd32322011-02-16 16:32:41 +02001071/* HCI command timer function */
1072static void hci_cmd_timer(unsigned long arg)
1073{
1074 struct hci_dev *hdev = (void *) arg;
1075
1076 BT_ERR("%s command tx timeout", hdev->name);
1077 atomic_set(&hdev->cmd_cnt, 1);
1078 tasklet_schedule(&hdev->cmd_task);
1079}
1080
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081/* Register HCI device */
1082int hci_register_dev(struct hci_dev *hdev)
1083{
1084 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001085 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001087 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1088 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089
1090 if (!hdev->open || !hdev->close || !hdev->destruct)
1091 return -EINVAL;
1092
1093 write_lock_bh(&hci_dev_list_lock);
1094
1095 /* Find first available device id */
1096 list_for_each(p, &hci_dev_list) {
1097 if (list_entry(p, struct hci_dev, list)->id != id)
1098 break;
1099 head = p; id++;
1100 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001101
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 sprintf(hdev->name, "hci%d", id);
1103 hdev->id = id;
1104 list_add(&hdev->list, head);
1105
1106 atomic_set(&hdev->refcnt, 1);
1107 spin_lock_init(&hdev->lock);
1108
1109 hdev->flags = 0;
1110 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001111 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001113 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114
Marcel Holtmann04837f62006-07-03 10:02:33 +02001115 hdev->idle_timeout = 0;
1116 hdev->sniff_max_interval = 800;
1117 hdev->sniff_min_interval = 80;
1118
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001119 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1121 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1122
1123 skb_queue_head_init(&hdev->rx_q);
1124 skb_queue_head_init(&hdev->cmd_q);
1125 skb_queue_head_init(&hdev->raw_q);
1126
Ville Tervo6bd32322011-02-16 16:32:41 +02001127 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1128
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301129 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001130 hdev->reassembly[i] = NULL;
1131
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001133 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135 inquiry_cache_init(hdev);
1136
1137 hci_conn_hash_init(hdev);
1138
David Millerea4bd8b2010-07-30 21:54:49 -07001139 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001140
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001141 INIT_LIST_HEAD(&hdev->uuids);
1142
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001143 INIT_LIST_HEAD(&hdev->link_keys);
1144
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001145 INIT_WORK(&hdev->power_on, hci_power_on);
1146 INIT_WORK(&hdev->power_off, hci_power_off);
1147 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1150
1151 atomic_set(&hdev->promisc, 0);
1152
1153 write_unlock_bh(&hci_dev_list_lock);
1154
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001155 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1156 if (!hdev->workqueue)
1157 goto nomem;
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 hci_register_sysfs(hdev);
1160
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001161 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1162 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1163 if (hdev->rfkill) {
1164 if (rfkill_register(hdev->rfkill) < 0) {
1165 rfkill_destroy(hdev->rfkill);
1166 hdev->rfkill = NULL;
1167 }
1168 }
1169
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001170 set_bit(HCI_AUTO_OFF, &hdev->flags);
1171 set_bit(HCI_SETUP, &hdev->flags);
1172 queue_work(hdev->workqueue, &hdev->power_on);
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 hci_notify(hdev, HCI_DEV_REG);
1175
1176 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001177
1178nomem:
1179 write_lock_bh(&hci_dev_list_lock);
1180 list_del(&hdev->list);
1181 write_unlock_bh(&hci_dev_list_lock);
1182
1183 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184}
1185EXPORT_SYMBOL(hci_register_dev);
1186
1187/* Unregister HCI device */
1188int hci_unregister_dev(struct hci_dev *hdev)
1189{
Marcel Holtmannef222012007-07-11 06:42:04 +02001190 int i;
1191
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001192 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 write_lock_bh(&hci_dev_list_lock);
1195 list_del(&hdev->list);
1196 write_unlock_bh(&hci_dev_list_lock);
1197
1198 hci_dev_do_close(hdev);
1199
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301200 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001201 kfree_skb(hdev->reassembly[i]);
1202
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001203 if (!test_bit(HCI_INIT, &hdev->flags) &&
1204 !test_bit(HCI_SETUP, &hdev->flags))
1205 mgmt_index_removed(hdev->id);
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 hci_notify(hdev, HCI_DEV_UNREG);
1208
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001209 if (hdev->rfkill) {
1210 rfkill_unregister(hdev->rfkill);
1211 rfkill_destroy(hdev->rfkill);
1212 }
1213
Dave Young147e2d52008-03-05 18:45:59 -08001214 hci_unregister_sysfs(hdev);
1215
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001216 hci_del_off_timer(hdev);
1217
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001218 destroy_workqueue(hdev->workqueue);
1219
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001220 hci_dev_lock_bh(hdev);
1221 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001222 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001223 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001224 hci_dev_unlock_bh(hdev);
1225
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 return 0;
1229}
1230EXPORT_SYMBOL(hci_unregister_dev);
1231
1232/* Suspend HCI device */
1233int hci_suspend_dev(struct hci_dev *hdev)
1234{
1235 hci_notify(hdev, HCI_DEV_SUSPEND);
1236 return 0;
1237}
1238EXPORT_SYMBOL(hci_suspend_dev);
1239
1240/* Resume HCI device */
1241int hci_resume_dev(struct hci_dev *hdev)
1242{
1243 hci_notify(hdev, HCI_DEV_RESUME);
1244 return 0;
1245}
1246EXPORT_SYMBOL(hci_resume_dev);
1247
Marcel Holtmann76bca882009-11-18 00:40:39 +01001248/* Receive frame from HCI drivers */
1249int hci_recv_frame(struct sk_buff *skb)
1250{
1251 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1252 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1253 && !test_bit(HCI_INIT, &hdev->flags))) {
1254 kfree_skb(skb);
1255 return -ENXIO;
1256 }
1257
1258 /* Incomming skb */
1259 bt_cb(skb)->incoming = 1;
1260
1261 /* Time stamp */
1262 __net_timestamp(skb);
1263
1264 /* Queue frame for rx task */
1265 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001266 tasklet_schedule(&hdev->rx_task);
1267
Marcel Holtmann76bca882009-11-18 00:40:39 +01001268 return 0;
1269}
1270EXPORT_SYMBOL(hci_recv_frame);
1271
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301272static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1273 int count, __u8 index, gfp_t gfp_mask)
1274{
1275 int len = 0;
1276 int hlen = 0;
1277 int remain = count;
1278 struct sk_buff *skb;
1279 struct bt_skb_cb *scb;
1280
1281 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1282 index >= NUM_REASSEMBLY)
1283 return -EILSEQ;
1284
1285 skb = hdev->reassembly[index];
1286
1287 if (!skb) {
1288 switch (type) {
1289 case HCI_ACLDATA_PKT:
1290 len = HCI_MAX_FRAME_SIZE;
1291 hlen = HCI_ACL_HDR_SIZE;
1292 break;
1293 case HCI_EVENT_PKT:
1294 len = HCI_MAX_EVENT_SIZE;
1295 hlen = HCI_EVENT_HDR_SIZE;
1296 break;
1297 case HCI_SCODATA_PKT:
1298 len = HCI_MAX_SCO_SIZE;
1299 hlen = HCI_SCO_HDR_SIZE;
1300 break;
1301 }
1302
1303 skb = bt_skb_alloc(len, gfp_mask);
1304 if (!skb)
1305 return -ENOMEM;
1306
1307 scb = (void *) skb->cb;
1308 scb->expect = hlen;
1309 scb->pkt_type = type;
1310
1311 skb->dev = (void *) hdev;
1312 hdev->reassembly[index] = skb;
1313 }
1314
1315 while (count) {
1316 scb = (void *) skb->cb;
1317 len = min(scb->expect, (__u16)count);
1318
1319 memcpy(skb_put(skb, len), data, len);
1320
1321 count -= len;
1322 data += len;
1323 scb->expect -= len;
1324 remain = count;
1325
1326 switch (type) {
1327 case HCI_EVENT_PKT:
1328 if (skb->len == HCI_EVENT_HDR_SIZE) {
1329 struct hci_event_hdr *h = hci_event_hdr(skb);
1330 scb->expect = h->plen;
1331
1332 if (skb_tailroom(skb) < scb->expect) {
1333 kfree_skb(skb);
1334 hdev->reassembly[index] = NULL;
1335 return -ENOMEM;
1336 }
1337 }
1338 break;
1339
1340 case HCI_ACLDATA_PKT:
1341 if (skb->len == HCI_ACL_HDR_SIZE) {
1342 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1343 scb->expect = __le16_to_cpu(h->dlen);
1344
1345 if (skb_tailroom(skb) < scb->expect) {
1346 kfree_skb(skb);
1347 hdev->reassembly[index] = NULL;
1348 return -ENOMEM;
1349 }
1350 }
1351 break;
1352
1353 case HCI_SCODATA_PKT:
1354 if (skb->len == HCI_SCO_HDR_SIZE) {
1355 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1356 scb->expect = h->dlen;
1357
1358 if (skb_tailroom(skb) < scb->expect) {
1359 kfree_skb(skb);
1360 hdev->reassembly[index] = NULL;
1361 return -ENOMEM;
1362 }
1363 }
1364 break;
1365 }
1366
1367 if (scb->expect == 0) {
1368 /* Complete frame */
1369
1370 bt_cb(skb)->pkt_type = type;
1371 hci_recv_frame(skb);
1372
1373 hdev->reassembly[index] = NULL;
1374 return remain;
1375 }
1376 }
1377
1378 return remain;
1379}
1380
Marcel Holtmannef222012007-07-11 06:42:04 +02001381int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1382{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301383 int rem = 0;
1384
Marcel Holtmannef222012007-07-11 06:42:04 +02001385 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1386 return -EILSEQ;
1387
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001388 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301389 rem = hci_reassembly(hdev, type, data, count,
1390 type - 1, GFP_ATOMIC);
1391 if (rem < 0)
1392 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001393
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301394 data += (count - rem);
1395 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001396 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001397
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301398 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001399}
1400EXPORT_SYMBOL(hci_recv_fragment);
1401
Suraj Sumangala99811512010-07-14 13:02:19 +05301402#define STREAM_REASSEMBLY 0
1403
1404int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1405{
1406 int type;
1407 int rem = 0;
1408
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001409 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301410 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1411
1412 if (!skb) {
1413 struct { char type; } *pkt;
1414
1415 /* Start of the frame */
1416 pkt = data;
1417 type = pkt->type;
1418
1419 data++;
1420 count--;
1421 } else
1422 type = bt_cb(skb)->pkt_type;
1423
1424 rem = hci_reassembly(hdev, type, data,
1425 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1426 if (rem < 0)
1427 return rem;
1428
1429 data += (count - rem);
1430 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001431 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301432
1433 return rem;
1434}
1435EXPORT_SYMBOL(hci_recv_stream_fragment);
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437/* ---- Interface to upper protocols ---- */
1438
1439/* Register/Unregister protocols.
1440 * hci_task_lock is used to ensure that no tasks are running. */
1441int hci_register_proto(struct hci_proto *hp)
1442{
1443 int err = 0;
1444
1445 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1446
1447 if (hp->id >= HCI_MAX_PROTO)
1448 return -EINVAL;
1449
1450 write_lock_bh(&hci_task_lock);
1451
1452 if (!hci_proto[hp->id])
1453 hci_proto[hp->id] = hp;
1454 else
1455 err = -EEXIST;
1456
1457 write_unlock_bh(&hci_task_lock);
1458
1459 return err;
1460}
1461EXPORT_SYMBOL(hci_register_proto);
1462
1463int hci_unregister_proto(struct hci_proto *hp)
1464{
1465 int err = 0;
1466
1467 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1468
1469 if (hp->id >= HCI_MAX_PROTO)
1470 return -EINVAL;
1471
1472 write_lock_bh(&hci_task_lock);
1473
1474 if (hci_proto[hp->id])
1475 hci_proto[hp->id] = NULL;
1476 else
1477 err = -ENOENT;
1478
1479 write_unlock_bh(&hci_task_lock);
1480
1481 return err;
1482}
1483EXPORT_SYMBOL(hci_unregister_proto);
1484
1485int hci_register_cb(struct hci_cb *cb)
1486{
1487 BT_DBG("%p name %s", cb, cb->name);
1488
1489 write_lock_bh(&hci_cb_list_lock);
1490 list_add(&cb->list, &hci_cb_list);
1491 write_unlock_bh(&hci_cb_list_lock);
1492
1493 return 0;
1494}
1495EXPORT_SYMBOL(hci_register_cb);
1496
1497int hci_unregister_cb(struct hci_cb *cb)
1498{
1499 BT_DBG("%p name %s", cb, cb->name);
1500
1501 write_lock_bh(&hci_cb_list_lock);
1502 list_del(&cb->list);
1503 write_unlock_bh(&hci_cb_list_lock);
1504
1505 return 0;
1506}
1507EXPORT_SYMBOL(hci_unregister_cb);
1508
1509static int hci_send_frame(struct sk_buff *skb)
1510{
1511 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1512
1513 if (!hdev) {
1514 kfree_skb(skb);
1515 return -ENODEV;
1516 }
1517
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001518 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 if (atomic_read(&hdev->promisc)) {
1521 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001522 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001524 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 }
1526
1527 /* Get rid of skb owner, prior to sending to the driver. */
1528 skb_orphan(skb);
1529
1530 return hdev->send(skb);
1531}
1532
1533/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001534int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535{
1536 int len = HCI_COMMAND_HDR_SIZE + plen;
1537 struct hci_command_hdr *hdr;
1538 struct sk_buff *skb;
1539
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001540 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 skb = bt_skb_alloc(len, GFP_ATOMIC);
1543 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001544 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 return -ENOMEM;
1546 }
1547
1548 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001549 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 hdr->plen = plen;
1551
1552 if (plen)
1553 memcpy(skb_put(skb, plen), param, plen);
1554
1555 BT_DBG("skb len %d", skb->len);
1556
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001557 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001559
Johan Hedberga5040ef2011-01-10 13:28:59 +02001560 if (test_bit(HCI_INIT, &hdev->flags))
1561 hdev->init_last_cmd = opcode;
1562
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001564 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565
1566 return 0;
1567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001570void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571{
1572 struct hci_command_hdr *hdr;
1573
1574 if (!hdev->sent_cmd)
1575 return NULL;
1576
1577 hdr = (void *) hdev->sent_cmd->data;
1578
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001579 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 return NULL;
1581
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001582 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
1584 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1585}
1586
1587/* Send ACL data */
1588static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1589{
1590 struct hci_acl_hdr *hdr;
1591 int len = skb->len;
1592
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001593 skb_push(skb, HCI_ACL_HDR_SIZE);
1594 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001595 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001596 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1597 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598}
1599
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001600void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
1602 struct hci_dev *hdev = conn->hdev;
1603 struct sk_buff *list;
1604
1605 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1606
1607 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001608 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001609 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001611 list = skb_shinfo(skb)->frag_list;
1612 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 /* Non fragmented */
1614 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1615
1616 skb_queue_tail(&conn->data_q, skb);
1617 } else {
1618 /* Fragmented */
1619 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1620
1621 skb_shinfo(skb)->frag_list = NULL;
1622
1623 /* Queue all fragments atomically */
1624 spin_lock_bh(&conn->data_q.lock);
1625
1626 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001627
1628 flags &= ~ACL_START;
1629 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 do {
1631 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001634 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001635 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
1637 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1638
1639 __skb_queue_tail(&conn->data_q, skb);
1640 } while (list);
1641
1642 spin_unlock_bh(&conn->data_q.lock);
1643 }
1644
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001645 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646}
1647EXPORT_SYMBOL(hci_send_acl);
1648
1649/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001650void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651{
1652 struct hci_dev *hdev = conn->hdev;
1653 struct hci_sco_hdr hdr;
1654
1655 BT_DBG("%s len %d", hdev->name, skb->len);
1656
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001657 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 hdr.dlen = skb->len;
1659
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001660 skb_push(skb, HCI_SCO_HDR_SIZE);
1661 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001662 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663
1664 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001665 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001666
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001668 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669}
1670EXPORT_SYMBOL(hci_send_sco);
1671
1672/* ---- HCI TX task (outgoing data) ---- */
1673
1674/* HCI Connection scheduler */
1675static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1676{
1677 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001678 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 int num = 0, min = ~0;
1680 struct list_head *p;
1681
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001682 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 * added and removed with TX task disabled. */
1684 list_for_each(p, &h->list) {
1685 struct hci_conn *c;
1686 c = list_entry(p, struct hci_conn, list);
1687
Marcel Holtmann769be972008-07-14 20:13:49 +02001688 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001690
1691 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1692 continue;
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 num++;
1695
1696 if (c->sent < min) {
1697 min = c->sent;
1698 conn = c;
1699 }
1700 }
1701
1702 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001703 int cnt, q;
1704
1705 switch (conn->type) {
1706 case ACL_LINK:
1707 cnt = hdev->acl_cnt;
1708 break;
1709 case SCO_LINK:
1710 case ESCO_LINK:
1711 cnt = hdev->sco_cnt;
1712 break;
1713 case LE_LINK:
1714 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1715 break;
1716 default:
1717 cnt = 0;
1718 BT_ERR("Unknown link type");
1719 }
1720
1721 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 *quote = q ? q : 1;
1723 } else
1724 *quote = 0;
1725
1726 BT_DBG("conn %p quote %d", conn, *quote);
1727 return conn;
1728}
1729
Ville Tervobae1f5d2011-02-10 22:38:53 -03001730static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731{
1732 struct hci_conn_hash *h = &hdev->conn_hash;
1733 struct list_head *p;
1734 struct hci_conn *c;
1735
Ville Tervobae1f5d2011-02-10 22:38:53 -03001736 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 /* Kill stalled connections */
1739 list_for_each(p, &h->list) {
1740 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03001741 if (c->type == type && c->sent) {
1742 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 hdev->name, batostr(&c->dst));
1744 hci_acl_disconn(c, 0x13);
1745 }
1746 }
1747}
1748
1749static inline void hci_sched_acl(struct hci_dev *hdev)
1750{
1751 struct hci_conn *conn;
1752 struct sk_buff *skb;
1753 int quote;
1754
1755 BT_DBG("%s", hdev->name);
1756
1757 if (!test_bit(HCI_RAW, &hdev->flags)) {
1758 /* ACL tx timeout must be longer than maximum
1759 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001760 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001761 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 }
1763
1764 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1765 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1766 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001767
1768 hci_conn_enter_active_mode(conn);
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 hci_send_frame(skb);
1771 hdev->acl_last_tx = jiffies;
1772
1773 hdev->acl_cnt--;
1774 conn->sent++;
1775 }
1776 }
1777}
1778
1779/* Schedule SCO */
1780static inline void hci_sched_sco(struct hci_dev *hdev)
1781{
1782 struct hci_conn *conn;
1783 struct sk_buff *skb;
1784 int quote;
1785
1786 BT_DBG("%s", hdev->name);
1787
1788 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1789 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1790 BT_DBG("skb %p len %d", skb, skb->len);
1791 hci_send_frame(skb);
1792
1793 conn->sent++;
1794 if (conn->sent == ~0)
1795 conn->sent = 0;
1796 }
1797 }
1798}
1799
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001800static inline void hci_sched_esco(struct hci_dev *hdev)
1801{
1802 struct hci_conn *conn;
1803 struct sk_buff *skb;
1804 int quote;
1805
1806 BT_DBG("%s", hdev->name);
1807
1808 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1809 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1810 BT_DBG("skb %p len %d", skb, skb->len);
1811 hci_send_frame(skb);
1812
1813 conn->sent++;
1814 if (conn->sent == ~0)
1815 conn->sent = 0;
1816 }
1817 }
1818}
1819
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001820static inline void hci_sched_le(struct hci_dev *hdev)
1821{
1822 struct hci_conn *conn;
1823 struct sk_buff *skb;
1824 int quote, cnt;
1825
1826 BT_DBG("%s", hdev->name);
1827
1828 if (!test_bit(HCI_RAW, &hdev->flags)) {
1829 /* LE tx timeout must be longer than maximum
1830 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03001831 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001832 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001833 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001834 }
1835
1836 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1837 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1838 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1839 BT_DBG("skb %p len %d", skb, skb->len);
1840
1841 hci_send_frame(skb);
1842 hdev->le_last_tx = jiffies;
1843
1844 cnt--;
1845 conn->sent++;
1846 }
1847 }
1848 if (hdev->le_pkts)
1849 hdev->le_cnt = cnt;
1850 else
1851 hdev->acl_cnt = cnt;
1852}
1853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854static void hci_tx_task(unsigned long arg)
1855{
1856 struct hci_dev *hdev = (struct hci_dev *) arg;
1857 struct sk_buff *skb;
1858
1859 read_lock(&hci_task_lock);
1860
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001861 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1862 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
1864 /* Schedule queues and send stuff to HCI driver */
1865
1866 hci_sched_acl(hdev);
1867
1868 hci_sched_sco(hdev);
1869
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001870 hci_sched_esco(hdev);
1871
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001872 hci_sched_le(hdev);
1873
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 /* Send next queued raw (unknown type) packet */
1875 while ((skb = skb_dequeue(&hdev->raw_q)))
1876 hci_send_frame(skb);
1877
1878 read_unlock(&hci_task_lock);
1879}
1880
1881/* ----- HCI RX task (incoming data proccessing) ----- */
1882
1883/* ACL data packet */
1884static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1885{
1886 struct hci_acl_hdr *hdr = (void *) skb->data;
1887 struct hci_conn *conn;
1888 __u16 handle, flags;
1889
1890 skb_pull(skb, HCI_ACL_HDR_SIZE);
1891
1892 handle = __le16_to_cpu(hdr->handle);
1893 flags = hci_flags(handle);
1894 handle = hci_handle(handle);
1895
1896 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1897
1898 hdev->stat.acl_rx++;
1899
1900 hci_dev_lock(hdev);
1901 conn = hci_conn_hash_lookup_handle(hdev, handle);
1902 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 if (conn) {
1905 register struct hci_proto *hp;
1906
Marcel Holtmann04837f62006-07-03 10:02:33 +02001907 hci_conn_enter_active_mode(conn);
1908
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001910 hp = hci_proto[HCI_PROTO_L2CAP];
1911 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 hp->recv_acldata(conn, skb, flags);
1913 return;
1914 }
1915 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001916 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 hdev->name, handle);
1918 }
1919
1920 kfree_skb(skb);
1921}
1922
1923/* SCO data packet */
1924static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1925{
1926 struct hci_sco_hdr *hdr = (void *) skb->data;
1927 struct hci_conn *conn;
1928 __u16 handle;
1929
1930 skb_pull(skb, HCI_SCO_HDR_SIZE);
1931
1932 handle = __le16_to_cpu(hdr->handle);
1933
1934 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1935
1936 hdev->stat.sco_rx++;
1937
1938 hci_dev_lock(hdev);
1939 conn = hci_conn_hash_lookup_handle(hdev, handle);
1940 hci_dev_unlock(hdev);
1941
1942 if (conn) {
1943 register struct hci_proto *hp;
1944
1945 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001946 hp = hci_proto[HCI_PROTO_SCO];
1947 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 hp->recv_scodata(conn, skb);
1949 return;
1950 }
1951 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001952 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 hdev->name, handle);
1954 }
1955
1956 kfree_skb(skb);
1957}
1958
Marcel Holtmann65164552005-10-28 19:20:48 +02001959static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960{
1961 struct hci_dev *hdev = (struct hci_dev *) arg;
1962 struct sk_buff *skb;
1963
1964 BT_DBG("%s", hdev->name);
1965
1966 read_lock(&hci_task_lock);
1967
1968 while ((skb = skb_dequeue(&hdev->rx_q))) {
1969 if (atomic_read(&hdev->promisc)) {
1970 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001971 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
1973
1974 if (test_bit(HCI_RAW, &hdev->flags)) {
1975 kfree_skb(skb);
1976 continue;
1977 }
1978
1979 if (test_bit(HCI_INIT, &hdev->flags)) {
1980 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001981 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 case HCI_ACLDATA_PKT:
1983 case HCI_SCODATA_PKT:
1984 kfree_skb(skb);
1985 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 }
1988
1989 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001990 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 case HCI_EVENT_PKT:
1992 hci_event_packet(hdev, skb);
1993 break;
1994
1995 case HCI_ACLDATA_PKT:
1996 BT_DBG("%s ACL data packet", hdev->name);
1997 hci_acldata_packet(hdev, skb);
1998 break;
1999
2000 case HCI_SCODATA_PKT:
2001 BT_DBG("%s SCO data packet", hdev->name);
2002 hci_scodata_packet(hdev, skb);
2003 break;
2004
2005 default:
2006 kfree_skb(skb);
2007 break;
2008 }
2009 }
2010
2011 read_unlock(&hci_task_lock);
2012}
2013
2014static void hci_cmd_task(unsigned long arg)
2015{
2016 struct hci_dev *hdev = (struct hci_dev *) arg;
2017 struct sk_buff *skb;
2018
2019 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002022 if (atomic_read(&hdev->cmd_cnt)) {
2023 skb = skb_dequeue(&hdev->cmd_q);
2024 if (!skb)
2025 return;
2026
Wei Yongjun7585b972009-02-25 18:29:52 +08002027 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002029 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2030 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 atomic_dec(&hdev->cmd_cnt);
2032 hci_send_frame(skb);
Ville Tervo6bd32322011-02-16 16:32:41 +02002033 mod_timer(&hdev->cmd_timer,
2034 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 } else {
2036 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002037 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 }
2039 }
2040}