blob: a80bc1cdb35b6aa568215e5245aaff87bfc2799b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/sock.h>
46
47#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100126 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100168 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300188 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200194 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 BT_DBG("%s %ld", hdev->name, opt);
200
201 /* Driver initialization */
202
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100209 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211 skb_queue_purge(&hdev->driver_init);
212
213 /* Mandatory initialization */
214
215 /* Reset */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200224 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230#if 0
231 /* Host buffer size */
232 {
233 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
240#endif
241
242 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Optional initialization */
255
256 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200257 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700261 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200263
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267}
268
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300269static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270{
271 BT_DBG("%s", hdev->name);
272
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 scan = opt;
280
281 BT_DBG("%s %x", hdev->name, scan);
282
283 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
287static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __u8 auth = opt;
290
291 BT_DBG("%s %x", hdev->name, auth);
292
293 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
297static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 encrypt = opt;
300
301 BT_DBG("%s %x", hdev->name, encrypt);
302
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200307static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __le16 policy = cpu_to_le16(opt);
310
Marcel Holtmanna418b892008-11-30 12:17:28 +0100311 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200312
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315}
316
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900317/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 * Device is held on return. */
319struct hci_dev *hci_dev_get(int index)
320{
321 struct hci_dev *hdev = NULL;
322 struct list_head *p;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/* ---- Inquiry support ---- */
342static void inquiry_cache_flush(struct hci_dev *hdev)
343{
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
346
347 BT_DBG("cache %p", cache);
348
349 cache->list = NULL;
350 while ((e = next)) {
351 next = e->next;
352 kfree(e);
353 }
354}
355
356struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357{
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
360
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
365 break;
366 return e;
367}
368
369void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200372 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200382
383 ie->next = cache->list;
384 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 cache->timestamp = jiffies;
390}
391
392static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393{
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
397 int copied = 0;
398
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
407 info++;
408 }
409
410 BT_DBG("cache %p, copied %d", cache, copied);
411 return copied;
412}
413
414static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415{
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
418
419 BT_DBG("%s", hdev->name);
420
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
422 return;
423
424 /* Start Inquiry */
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
431int hci_inquiry(void __user *arg)
432{
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
437 long timeo;
438 __u8 *buf;
439
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
441 return -EFAULT;
442
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return -ENODEV;
446
447 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 inquiry_cache_flush(hdev);
452 do_inquiry = 1;
453 }
454 hci_dev_unlock_bh(hdev);
455
Marcel Holtmann04837f62006-07-03 10:02:33 +0200456 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200457
458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
469 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200471 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 err = -ENOMEM;
473 goto done;
474 }
475
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
479
480 BT_DBG("num_rsp %d", ir.num_rsp);
481
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 ptr += sizeof(ir);
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485 ir.num_rsp))
486 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900487 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 err = -EFAULT;
489
490 kfree(buf);
491
492done:
493 hci_dev_put(hdev);
494 return err;
495}
496
497/* ---- HCI ioctl helpers ---- */
498
499int hci_dev_open(__u16 dev)
500{
501 struct hci_dev *hdev;
502 int ret = 0;
503
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200504 hdev = hci_dev_get(dev);
505 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return -ENODEV;
507
508 BT_DBG("%s %p", hdev->name, hdev);
509
510 hci_req_lock(hdev);
511
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL;
514 goto done;
515 }
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (test_bit(HCI_UP, &hdev->flags)) {
518 ret = -EALREADY;
519 goto done;
520 }
521
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
524
Marcel Holtmann943da252010-02-13 02:28:41 +0100525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200537 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Marcel Holtmann04837f62006-07-03 10:02:33 +0200539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900555 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
560
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
563
564 if (hdev->flush)
565 hdev->flush(hdev);
566
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
570 }
571
572 hdev->close(hdev);
573 hdev->flags = 0;
574 }
575
576done:
577 hci_req_unlock(hdev);
578 hci_dev_put(hdev);
579 return ret;
580}
581
582static int hci_dev_do_close(struct hci_dev *hdev)
583{
584 BT_DBG("%s %p", hdev->name, hdev);
585
586 hci_req_cancel(hdev, ENODEV);
587 hci_req_lock(hdev);
588
Thomas Gleixner6f5ef992011-03-24 20:16:42 +0100589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 hci_req_unlock(hdev);
594 return 0;
595 }
596
597 /* Kill RX and TX tasks */
598 tasklet_kill(&hdev->rx_task);
599 tasklet_kill(&hdev->tx_task);
600
601 hci_dev_lock_bh(hdev);
602 inquiry_cache_flush(hdev);
603 hci_conn_hash_flush(hdev);
604 hci_dev_unlock_bh(hdev);
605
606 hci_notify(hdev, HCI_DEV_DOWN);
607
608 if (hdev->flush)
609 hdev->flush(hdev);
610
611 /* Reset device */
612 skb_queue_purge(&hdev->cmd_q);
613 atomic_set(&hdev->cmd_cnt, 1);
614 if (!test_bit(HCI_RAW, &hdev->flags)) {
615 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200616 __hci_request(hdev, hci_reset_req, 0,
617 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 clear_bit(HCI_INIT, &hdev->flags);
619 }
620
621 /* Kill cmd task */
622 tasklet_kill(&hdev->cmd_task);
623
624 /* Drop queues */
625 skb_queue_purge(&hdev->rx_q);
626 skb_queue_purge(&hdev->cmd_q);
627 skb_queue_purge(&hdev->raw_q);
628
629 /* Drop last sent command */
630 if (hdev->sent_cmd) {
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
633 }
634
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
637 hdev->close(hdev);
638
Johan Hedberg5add6af2010-12-16 10:00:37 +0200639 mgmt_powered(hdev->id, 0);
640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 /* Clear flags */
642 hdev->flags = 0;
643
644 hci_req_unlock(hdev);
645
646 hci_dev_put(hdev);
647 return 0;
648}
649
650int hci_dev_close(__u16 dev)
651{
652 struct hci_dev *hdev;
653 int err;
654
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200655 hdev = hci_dev_get(dev);
656 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return -ENODEV;
658 err = hci_dev_do_close(hdev);
659 hci_dev_put(hdev);
660 return err;
661}
662
663int hci_dev_reset(__u16 dev)
664{
665 struct hci_dev *hdev;
666 int ret = 0;
667
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200668 hdev = hci_dev_get(dev);
669 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return -ENODEV;
671
672 hci_req_lock(hdev);
673 tasklet_disable(&hdev->tx_task);
674
675 if (!test_bit(HCI_UP, &hdev->flags))
676 goto done;
677
678 /* Drop queues */
679 skb_queue_purge(&hdev->rx_q);
680 skb_queue_purge(&hdev->cmd_q);
681
682 hci_dev_lock_bh(hdev);
683 inquiry_cache_flush(hdev);
684 hci_conn_hash_flush(hdev);
685 hci_dev_unlock_bh(hdev);
686
687 if (hdev->flush)
688 hdev->flush(hdev);
689
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900690 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300691 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200694 ret = __hci_request(hdev, hci_reset_req, 0,
695 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697done:
698 tasklet_enable(&hdev->tx_task);
699 hci_req_unlock(hdev);
700 hci_dev_put(hdev);
701 return ret;
702}
703
704int hci_dev_reset_stat(__u16 dev)
705{
706 struct hci_dev *hdev;
707 int ret = 0;
708
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200709 hdev = hci_dev_get(dev);
710 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return -ENODEV;
712
713 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715 hci_dev_put(hdev);
716
717 return ret;
718}
719
720int hci_dev_cmd(unsigned int cmd, void __user *arg)
721{
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
724 int err = 0;
725
726 if (copy_from_user(&dr, arg, sizeof(dr)))
727 return -EFAULT;
728
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200729 hdev = hci_dev_get(dr.dev_id);
730 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 return -ENODEV;
732
733 switch (cmd) {
734 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200735 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 break;
738
739 case HCISETENCRYPT:
740 if (!lmp_encrypt_capable(hdev)) {
741 err = -EOPNOTSUPP;
742 break;
743 }
744
745 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 if (err)
750 break;
751 }
752
Marcel Holtmann04837f62006-07-03 10:02:33 +0200753 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 break;
756
757 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200758 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 break;
761
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200762 case HCISETLINKPOL:
763 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
765 break;
766
767 case HCISETLINKMODE:
768 hdev->link_mode = ((__u16) dr.dev_opt) &
769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
770 break;
771
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 case HCISETPTYPE:
773 hdev->pkt_type = (__u16) dr.dev_opt;
774 break;
775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 break;
780
781 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 break;
785
786 default:
787 err = -EINVAL;
788 break;
789 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_get_dev_list(void __user *arg)
796{
797 struct hci_dev_list_req *dl;
798 struct hci_dev_req *dr;
799 struct list_head *p;
800 int n = 0, size, err;
801 __u16 dev_num;
802
803 if (get_user(dev_num, (__u16 __user *) arg))
804 return -EFAULT;
805
806 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 return -EINVAL;
808
809 size = sizeof(*dl) + dev_num * sizeof(*dr);
810
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200811 dl = kzalloc(size, GFP_KERNEL);
812 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 return -ENOMEM;
814
815 dr = dl->dev_req;
816
817 read_lock_bh(&hci_dev_list_lock);
818 list_for_each(p, &hci_dev_list) {
819 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200822
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200823 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200824
825 if (!test_bit(HCI_MGMT, &hdev->flags))
826 set_bit(HCI_PAIRABLE, &hdev->flags);
827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 (dr + n)->dev_id = hdev->id;
829 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 if (++n >= dev_num)
832 break;
833 }
834 read_unlock_bh(&hci_dev_list_lock);
835
836 dl->dev_num = n;
837 size = sizeof(*dl) + n * sizeof(*dr);
838
839 err = copy_to_user(arg, dl, size);
840 kfree(dl);
841
842 return err ? -EFAULT : 0;
843}
844
845int hci_get_dev_info(void __user *arg)
846{
847 struct hci_dev *hdev;
848 struct hci_dev_info di;
849 int err = 0;
850
851 if (copy_from_user(&di, arg, sizeof(di)))
852 return -EFAULT;
853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200854 hdev = hci_dev_get(di.dev_id);
855 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 return -ENODEV;
857
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200858 hci_del_off_timer(hdev);
859
Johan Hedbergc542a062011-01-26 13:11:03 +0200860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
862
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
874
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878 if (copy_to_user(arg, &di, sizeof(di)))
879 err = -EFAULT;
880
881 hci_dev_put(hdev);
882
883 return err;
884}
885
886/* ---- Interface to HCI drivers ---- */
887
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200888static int hci_rfkill_set_block(void *data, bool blocked)
889{
890 struct hci_dev *hdev = data;
891
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894 if (!blocked)
895 return 0;
896
897 hci_dev_do_close(hdev);
898
899 return 0;
900}
901
902static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
904};
905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906/* Alloc HCI device */
907struct hci_dev *hci_alloc_dev(void)
908{
909 struct hci_dev *hdev;
910
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 if (!hdev)
913 return NULL;
914
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 skb_queue_head_init(&hdev->driver_init);
916
917 return hdev;
918}
919EXPORT_SYMBOL(hci_alloc_dev);
920
921/* Free HCI device */
922void hci_free_dev(struct hci_dev *hdev)
923{
924 skb_queue_purge(&hdev->driver_init);
925
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200926 /* will free via device release */
927 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928}
929EXPORT_SYMBOL(hci_free_dev);
930
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200931static void hci_power_on(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935 BT_DBG("%s", hdev->name);
936
937 if (hci_dev_open(hdev->id) < 0)
938 return;
939
940 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941 mod_timer(&hdev->off_timer,
942 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945 mgmt_index_added(hdev->id);
946}
947
948static void hci_power_off(struct work_struct *work)
949{
950 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
951
952 BT_DBG("%s", hdev->name);
953
954 hci_dev_close(hdev->id);
955}
956
957static void hci_auto_off(unsigned long data)
958{
959 struct hci_dev *hdev = (struct hci_dev *) data;
960
961 BT_DBG("%s", hdev->name);
962
963 clear_bit(HCI_AUTO_OFF, &hdev->flags);
964
965 queue_work(hdev->workqueue, &hdev->power_off);
966}
967
968void hci_del_off_timer(struct hci_dev *hdev)
969{
970 BT_DBG("%s", hdev->name);
971
972 clear_bit(HCI_AUTO_OFF, &hdev->flags);
973 del_timer(&hdev->off_timer);
974}
975
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200976int hci_uuids_clear(struct hci_dev *hdev)
977{
978 struct list_head *p, *n;
979
980 list_for_each_safe(p, n, &hdev->uuids) {
981 struct bt_uuid *uuid;
982
983 uuid = list_entry(p, struct bt_uuid, list);
984
985 list_del(p);
986 kfree(uuid);
987 }
988
989 return 0;
990}
991
Johan Hedberg55ed8ca12011-01-17 14:41:05 +0200992int hci_link_keys_clear(struct hci_dev *hdev)
993{
994 struct list_head *p, *n;
995
996 list_for_each_safe(p, n, &hdev->link_keys) {
997 struct link_key *key;
998
999 key = list_entry(p, struct link_key, list);
1000
1001 list_del(p);
1002 kfree(key);
1003 }
1004
1005 return 0;
1006}
1007
1008struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1009{
1010 struct list_head *p;
1011
1012 list_for_each(p, &hdev->link_keys) {
1013 struct link_key *k;
1014
1015 k = list_entry(p, struct link_key, list);
1016
1017 if (bacmp(bdaddr, &k->bdaddr) == 0)
1018 return k;
1019 }
1020
1021 return NULL;
1022}
1023
1024int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1025 u8 *val, u8 type, u8 pin_len)
1026{
1027 struct link_key *key, *old_key;
1028 u8 old_key_type;
1029
1030 old_key = hci_find_link_key(hdev, bdaddr);
1031 if (old_key) {
1032 old_key_type = old_key->type;
1033 key = old_key;
1034 } else {
1035 old_key_type = 0xff;
1036 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1037 if (!key)
1038 return -ENOMEM;
1039 list_add(&key->list, &hdev->link_keys);
1040 }
1041
1042 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1043
1044 bacpy(&key->bdaddr, bdaddr);
1045 memcpy(key->val, val, 16);
1046 key->type = type;
1047 key->pin_len = pin_len;
1048
1049 if (new_key)
1050 mgmt_new_key(hdev->id, key, old_key_type);
1051
1052 if (type == 0x06)
1053 key->type = old_key_type;
1054
1055 return 0;
1056}
1057
1058int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1059{
1060 struct link_key *key;
1061
1062 key = hci_find_link_key(hdev, bdaddr);
1063 if (!key)
1064 return -ENOENT;
1065
1066 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1067
1068 list_del(&key->list);
1069 kfree(key);
1070
1071 return 0;
1072}
1073
Ville Tervo6bd32322011-02-16 16:32:41 +02001074/* HCI command timer function */
1075static void hci_cmd_timer(unsigned long arg)
1076{
1077 struct hci_dev *hdev = (void *) arg;
1078
1079 BT_ERR("%s command tx timeout", hdev->name);
1080 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovan10572132011-03-16 15:36:29 -03001081 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001082 tasklet_schedule(&hdev->cmd_task);
1083}
1084
Szymon Janc2763eda2011-03-22 13:12:22 +01001085struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1086 bdaddr_t *bdaddr)
1087{
1088 struct oob_data *data;
1089
1090 list_for_each_entry(data, &hdev->remote_oob_data, list)
1091 if (bacmp(bdaddr, &data->bdaddr) == 0)
1092 return data;
1093
1094 return NULL;
1095}
1096
1097int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1098{
1099 struct oob_data *data;
1100
1101 data = hci_find_remote_oob_data(hdev, bdaddr);
1102 if (!data)
1103 return -ENOENT;
1104
1105 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1106
1107 list_del(&data->list);
1108 kfree(data);
1109
1110 return 0;
1111}
1112
1113int hci_remote_oob_data_clear(struct hci_dev *hdev)
1114{
1115 struct oob_data *data, *n;
1116
1117 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1118 list_del(&data->list);
1119 kfree(data);
1120 }
1121
1122 return 0;
1123}
1124
1125int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1126 u8 *randomizer)
1127{
1128 struct oob_data *data;
1129
1130 data = hci_find_remote_oob_data(hdev, bdaddr);
1131
1132 if (!data) {
1133 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1134 if (!data)
1135 return -ENOMEM;
1136
1137 bacpy(&data->bdaddr, bdaddr);
1138 list_add(&data->list, &hdev->remote_oob_data);
1139 }
1140
1141 memcpy(data->hash, hash, sizeof(data->hash));
1142 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1143
1144 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1145
1146 return 0;
1147}
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149/* Register HCI device */
1150int hci_register_dev(struct hci_dev *hdev)
1151{
1152 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001153 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001155 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1156 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
1158 if (!hdev->open || !hdev->close || !hdev->destruct)
1159 return -EINVAL;
1160
1161 write_lock_bh(&hci_dev_list_lock);
1162
1163 /* Find first available device id */
1164 list_for_each(p, &hci_dev_list) {
1165 if (list_entry(p, struct hci_dev, list)->id != id)
1166 break;
1167 head = p; id++;
1168 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001169
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 sprintf(hdev->name, "hci%d", id);
1171 hdev->id = id;
1172 list_add(&hdev->list, head);
1173
1174 atomic_set(&hdev->refcnt, 1);
1175 spin_lock_init(&hdev->lock);
1176
1177 hdev->flags = 0;
1178 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001179 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001181 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Marcel Holtmann04837f62006-07-03 10:02:33 +02001183 hdev->idle_timeout = 0;
1184 hdev->sniff_max_interval = 800;
1185 hdev->sniff_min_interval = 80;
1186
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001187 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1189 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1190
1191 skb_queue_head_init(&hdev->rx_q);
1192 skb_queue_head_init(&hdev->cmd_q);
1193 skb_queue_head_init(&hdev->raw_q);
1194
Ville Tervo6bd32322011-02-16 16:32:41 +02001195 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1196
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301197 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001198 hdev->reassembly[i] = NULL;
1199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001201 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
1203 inquiry_cache_init(hdev);
1204
1205 hci_conn_hash_init(hdev);
1206
David Millerea4bd8b2010-07-30 21:54:49 -07001207 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001208
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001209 INIT_LIST_HEAD(&hdev->uuids);
1210
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001211 INIT_LIST_HEAD(&hdev->link_keys);
1212
Szymon Janc2763eda2011-03-22 13:12:22 +01001213 INIT_LIST_HEAD(&hdev->remote_oob_data);
1214
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001215 INIT_WORK(&hdev->power_on, hci_power_on);
1216 INIT_WORK(&hdev->power_off, hci_power_off);
1217 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1220
1221 atomic_set(&hdev->promisc, 0);
1222
1223 write_unlock_bh(&hci_dev_list_lock);
1224
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001225 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1226 if (!hdev->workqueue)
1227 goto nomem;
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hci_register_sysfs(hdev);
1230
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001231 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1232 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1233 if (hdev->rfkill) {
1234 if (rfkill_register(hdev->rfkill) < 0) {
1235 rfkill_destroy(hdev->rfkill);
1236 hdev->rfkill = NULL;
1237 }
1238 }
1239
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001240 set_bit(HCI_AUTO_OFF, &hdev->flags);
1241 set_bit(HCI_SETUP, &hdev->flags);
1242 queue_work(hdev->workqueue, &hdev->power_on);
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 hci_notify(hdev, HCI_DEV_REG);
1245
1246 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001247
1248nomem:
1249 write_lock_bh(&hci_dev_list_lock);
1250 list_del(&hdev->list);
1251 write_unlock_bh(&hci_dev_list_lock);
1252
1253 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254}
1255EXPORT_SYMBOL(hci_register_dev);
1256
1257/* Unregister HCI device */
1258int hci_unregister_dev(struct hci_dev *hdev)
1259{
Marcel Holtmannef222012007-07-11 06:42:04 +02001260 int i;
1261
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001262 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 write_lock_bh(&hci_dev_list_lock);
1265 list_del(&hdev->list);
1266 write_unlock_bh(&hci_dev_list_lock);
1267
1268 hci_dev_do_close(hdev);
1269
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301270 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001271 kfree_skb(hdev->reassembly[i]);
1272
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001273 if (!test_bit(HCI_INIT, &hdev->flags) &&
1274 !test_bit(HCI_SETUP, &hdev->flags))
1275 mgmt_index_removed(hdev->id);
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 hci_notify(hdev, HCI_DEV_UNREG);
1278
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001279 if (hdev->rfkill) {
1280 rfkill_unregister(hdev->rfkill);
1281 rfkill_destroy(hdev->rfkill);
1282 }
1283
Dave Young147e2d52008-03-05 18:45:59 -08001284 hci_unregister_sysfs(hdev);
1285
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001286 hci_del_off_timer(hdev);
1287
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001288 destroy_workqueue(hdev->workqueue);
1289
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001290 hci_dev_lock_bh(hdev);
1291 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001292 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001294 hci_remote_oob_data_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001295 hci_dev_unlock_bh(hdev);
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 return 0;
1300}
1301EXPORT_SYMBOL(hci_unregister_dev);
1302
1303/* Suspend HCI device */
1304int hci_suspend_dev(struct hci_dev *hdev)
1305{
1306 hci_notify(hdev, HCI_DEV_SUSPEND);
1307 return 0;
1308}
1309EXPORT_SYMBOL(hci_suspend_dev);
1310
1311/* Resume HCI device */
1312int hci_resume_dev(struct hci_dev *hdev)
1313{
1314 hci_notify(hdev, HCI_DEV_RESUME);
1315 return 0;
1316}
1317EXPORT_SYMBOL(hci_resume_dev);
1318
Marcel Holtmann76bca882009-11-18 00:40:39 +01001319/* Receive frame from HCI drivers */
1320int hci_recv_frame(struct sk_buff *skb)
1321{
1322 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1323 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1324 && !test_bit(HCI_INIT, &hdev->flags))) {
1325 kfree_skb(skb);
1326 return -ENXIO;
1327 }
1328
1329 /* Incomming skb */
1330 bt_cb(skb)->incoming = 1;
1331
1332 /* Time stamp */
1333 __net_timestamp(skb);
1334
1335 /* Queue frame for rx task */
1336 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001337 tasklet_schedule(&hdev->rx_task);
1338
Marcel Holtmann76bca882009-11-18 00:40:39 +01001339 return 0;
1340}
1341EXPORT_SYMBOL(hci_recv_frame);
1342
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301343static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001344 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301345{
1346 int len = 0;
1347 int hlen = 0;
1348 int remain = count;
1349 struct sk_buff *skb;
1350 struct bt_skb_cb *scb;
1351
1352 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1353 index >= NUM_REASSEMBLY)
1354 return -EILSEQ;
1355
1356 skb = hdev->reassembly[index];
1357
1358 if (!skb) {
1359 switch (type) {
1360 case HCI_ACLDATA_PKT:
1361 len = HCI_MAX_FRAME_SIZE;
1362 hlen = HCI_ACL_HDR_SIZE;
1363 break;
1364 case HCI_EVENT_PKT:
1365 len = HCI_MAX_EVENT_SIZE;
1366 hlen = HCI_EVENT_HDR_SIZE;
1367 break;
1368 case HCI_SCODATA_PKT:
1369 len = HCI_MAX_SCO_SIZE;
1370 hlen = HCI_SCO_HDR_SIZE;
1371 break;
1372 }
1373
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001374 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301375 if (!skb)
1376 return -ENOMEM;
1377
1378 scb = (void *) skb->cb;
1379 scb->expect = hlen;
1380 scb->pkt_type = type;
1381
1382 skb->dev = (void *) hdev;
1383 hdev->reassembly[index] = skb;
1384 }
1385
1386 while (count) {
1387 scb = (void *) skb->cb;
1388 len = min(scb->expect, (__u16)count);
1389
1390 memcpy(skb_put(skb, len), data, len);
1391
1392 count -= len;
1393 data += len;
1394 scb->expect -= len;
1395 remain = count;
1396
1397 switch (type) {
1398 case HCI_EVENT_PKT:
1399 if (skb->len == HCI_EVENT_HDR_SIZE) {
1400 struct hci_event_hdr *h = hci_event_hdr(skb);
1401 scb->expect = h->plen;
1402
1403 if (skb_tailroom(skb) < scb->expect) {
1404 kfree_skb(skb);
1405 hdev->reassembly[index] = NULL;
1406 return -ENOMEM;
1407 }
1408 }
1409 break;
1410
1411 case HCI_ACLDATA_PKT:
1412 if (skb->len == HCI_ACL_HDR_SIZE) {
1413 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1414 scb->expect = __le16_to_cpu(h->dlen);
1415
1416 if (skb_tailroom(skb) < scb->expect) {
1417 kfree_skb(skb);
1418 hdev->reassembly[index] = NULL;
1419 return -ENOMEM;
1420 }
1421 }
1422 break;
1423
1424 case HCI_SCODATA_PKT:
1425 if (skb->len == HCI_SCO_HDR_SIZE) {
1426 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1427 scb->expect = h->dlen;
1428
1429 if (skb_tailroom(skb) < scb->expect) {
1430 kfree_skb(skb);
1431 hdev->reassembly[index] = NULL;
1432 return -ENOMEM;
1433 }
1434 }
1435 break;
1436 }
1437
1438 if (scb->expect == 0) {
1439 /* Complete frame */
1440
1441 bt_cb(skb)->pkt_type = type;
1442 hci_recv_frame(skb);
1443
1444 hdev->reassembly[index] = NULL;
1445 return remain;
1446 }
1447 }
1448
1449 return remain;
1450}
1451
Marcel Holtmannef222012007-07-11 06:42:04 +02001452int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1453{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301454 int rem = 0;
1455
Marcel Holtmannef222012007-07-11 06:42:04 +02001456 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1457 return -EILSEQ;
1458
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001459 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001460 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301461 if (rem < 0)
1462 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001463
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301464 data += (count - rem);
1465 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001466 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001467
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301468 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001469}
1470EXPORT_SYMBOL(hci_recv_fragment);
1471
Suraj Sumangala99811512010-07-14 13:02:19 +05301472#define STREAM_REASSEMBLY 0
1473
1474int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1475{
1476 int type;
1477 int rem = 0;
1478
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001479 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301480 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1481
1482 if (!skb) {
1483 struct { char type; } *pkt;
1484
1485 /* Start of the frame */
1486 pkt = data;
1487 type = pkt->type;
1488
1489 data++;
1490 count--;
1491 } else
1492 type = bt_cb(skb)->pkt_type;
1493
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001494 rem = hci_reassembly(hdev, type, data, count,
1495 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301496 if (rem < 0)
1497 return rem;
1498
1499 data += (count - rem);
1500 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001501 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301502
1503 return rem;
1504}
1505EXPORT_SYMBOL(hci_recv_stream_fragment);
1506
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507/* ---- Interface to upper protocols ---- */
1508
1509/* Register/Unregister protocols.
1510 * hci_task_lock is used to ensure that no tasks are running. */
1511int hci_register_proto(struct hci_proto *hp)
1512{
1513 int err = 0;
1514
1515 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1516
1517 if (hp->id >= HCI_MAX_PROTO)
1518 return -EINVAL;
1519
1520 write_lock_bh(&hci_task_lock);
1521
1522 if (!hci_proto[hp->id])
1523 hci_proto[hp->id] = hp;
1524 else
1525 err = -EEXIST;
1526
1527 write_unlock_bh(&hci_task_lock);
1528
1529 return err;
1530}
1531EXPORT_SYMBOL(hci_register_proto);
1532
1533int hci_unregister_proto(struct hci_proto *hp)
1534{
1535 int err = 0;
1536
1537 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1538
1539 if (hp->id >= HCI_MAX_PROTO)
1540 return -EINVAL;
1541
1542 write_lock_bh(&hci_task_lock);
1543
1544 if (hci_proto[hp->id])
1545 hci_proto[hp->id] = NULL;
1546 else
1547 err = -ENOENT;
1548
1549 write_unlock_bh(&hci_task_lock);
1550
1551 return err;
1552}
1553EXPORT_SYMBOL(hci_unregister_proto);
1554
1555int hci_register_cb(struct hci_cb *cb)
1556{
1557 BT_DBG("%p name %s", cb, cb->name);
1558
1559 write_lock_bh(&hci_cb_list_lock);
1560 list_add(&cb->list, &hci_cb_list);
1561 write_unlock_bh(&hci_cb_list_lock);
1562
1563 return 0;
1564}
1565EXPORT_SYMBOL(hci_register_cb);
1566
1567int hci_unregister_cb(struct hci_cb *cb)
1568{
1569 BT_DBG("%p name %s", cb, cb->name);
1570
1571 write_lock_bh(&hci_cb_list_lock);
1572 list_del(&cb->list);
1573 write_unlock_bh(&hci_cb_list_lock);
1574
1575 return 0;
1576}
1577EXPORT_SYMBOL(hci_unregister_cb);
1578
1579static int hci_send_frame(struct sk_buff *skb)
1580{
1581 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1582
1583 if (!hdev) {
1584 kfree_skb(skb);
1585 return -ENODEV;
1586 }
1587
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001588 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589
1590 if (atomic_read(&hdev->promisc)) {
1591 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001592 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001594 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 }
1596
1597 /* Get rid of skb owner, prior to sending to the driver. */
1598 skb_orphan(skb);
1599
1600 return hdev->send(skb);
1601}
1602
1603/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001604int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
1606 int len = HCI_COMMAND_HDR_SIZE + plen;
1607 struct hci_command_hdr *hdr;
1608 struct sk_buff *skb;
1609
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001610 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
1612 skb = bt_skb_alloc(len, GFP_ATOMIC);
1613 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001614 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 return -ENOMEM;
1616 }
1617
1618 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001619 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 hdr->plen = plen;
1621
1622 if (plen)
1623 memcpy(skb_put(skb, plen), param, plen);
1624
1625 BT_DBG("skb len %d", skb->len);
1626
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001627 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001629
Johan Hedberga5040ef2011-01-10 13:28:59 +02001630 if (test_bit(HCI_INIT, &hdev->flags))
1631 hdev->init_last_cmd = opcode;
1632
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001634 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 return 0;
1637}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001640void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641{
1642 struct hci_command_hdr *hdr;
1643
1644 if (!hdev->sent_cmd)
1645 return NULL;
1646
1647 hdr = (void *) hdev->sent_cmd->data;
1648
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001649 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 return NULL;
1651
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001652 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
1654 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1655}
1656
1657/* Send ACL data */
1658static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1659{
1660 struct hci_acl_hdr *hdr;
1661 int len = skb->len;
1662
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001663 skb_push(skb, HCI_ACL_HDR_SIZE);
1664 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001665 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001666 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1667 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668}
1669
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001670void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671{
1672 struct hci_dev *hdev = conn->hdev;
1673 struct sk_buff *list;
1674
1675 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1676
1677 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001678 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001679 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001681 list = skb_shinfo(skb)->frag_list;
1682 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 /* Non fragmented */
1684 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1685
1686 skb_queue_tail(&conn->data_q, skb);
1687 } else {
1688 /* Fragmented */
1689 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1690
1691 skb_shinfo(skb)->frag_list = NULL;
1692
1693 /* Queue all fragments atomically */
1694 spin_lock_bh(&conn->data_q.lock);
1695
1696 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001697
1698 flags &= ~ACL_START;
1699 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 do {
1701 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001704 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001705 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706
1707 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1708
1709 __skb_queue_tail(&conn->data_q, skb);
1710 } while (list);
1711
1712 spin_unlock_bh(&conn->data_q.lock);
1713 }
1714
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001715 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716}
1717EXPORT_SYMBOL(hci_send_acl);
1718
1719/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001720void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721{
1722 struct hci_dev *hdev = conn->hdev;
1723 struct hci_sco_hdr hdr;
1724
1725 BT_DBG("%s len %d", hdev->name, skb->len);
1726
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001727 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 hdr.dlen = skb->len;
1729
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001730 skb_push(skb, HCI_SCO_HDR_SIZE);
1731 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001732 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001735 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001738 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739}
1740EXPORT_SYMBOL(hci_send_sco);
1741
1742/* ---- HCI TX task (outgoing data) ---- */
1743
1744/* HCI Connection scheduler */
1745static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1746{
1747 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001748 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 int num = 0, min = ~0;
1750 struct list_head *p;
1751
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001752 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 * added and removed with TX task disabled. */
1754 list_for_each(p, &h->list) {
1755 struct hci_conn *c;
1756 c = list_entry(p, struct hci_conn, list);
1757
Marcel Holtmann769be972008-07-14 20:13:49 +02001758 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001760
1761 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1762 continue;
1763
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 num++;
1765
1766 if (c->sent < min) {
1767 min = c->sent;
1768 conn = c;
1769 }
1770 }
1771
1772 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001773 int cnt, q;
1774
1775 switch (conn->type) {
1776 case ACL_LINK:
1777 cnt = hdev->acl_cnt;
1778 break;
1779 case SCO_LINK:
1780 case ESCO_LINK:
1781 cnt = hdev->sco_cnt;
1782 break;
1783 case LE_LINK:
1784 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1785 break;
1786 default:
1787 cnt = 0;
1788 BT_ERR("Unknown link type");
1789 }
1790
1791 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 *quote = q ? q : 1;
1793 } else
1794 *quote = 0;
1795
1796 BT_DBG("conn %p quote %d", conn, *quote);
1797 return conn;
1798}
1799
Ville Tervobae1f5d92011-02-10 22:38:53 -03001800static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801{
1802 struct hci_conn_hash *h = &hdev->conn_hash;
1803 struct list_head *p;
1804 struct hci_conn *c;
1805
Ville Tervobae1f5d92011-02-10 22:38:53 -03001806 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808 /* Kill stalled connections */
1809 list_for_each(p, &h->list) {
1810 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d92011-02-10 22:38:53 -03001811 if (c->type == type && c->sent) {
1812 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 hdev->name, batostr(&c->dst));
1814 hci_acl_disconn(c, 0x13);
1815 }
1816 }
1817}
1818
1819static inline void hci_sched_acl(struct hci_dev *hdev)
1820{
1821 struct hci_conn *conn;
1822 struct sk_buff *skb;
1823 int quote;
1824
1825 BT_DBG("%s", hdev->name);
1826
1827 if (!test_bit(HCI_RAW, &hdev->flags)) {
1828 /* ACL tx timeout must be longer than maximum
1829 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001830 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03001831 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 }
1833
1834 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1835 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1836 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001837
1838 hci_conn_enter_active_mode(conn);
1839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 hci_send_frame(skb);
1841 hdev->acl_last_tx = jiffies;
1842
1843 hdev->acl_cnt--;
1844 conn->sent++;
1845 }
1846 }
1847}
1848
1849/* Schedule SCO */
1850static inline void hci_sched_sco(struct hci_dev *hdev)
1851{
1852 struct hci_conn *conn;
1853 struct sk_buff *skb;
1854 int quote;
1855
1856 BT_DBG("%s", hdev->name);
1857
1858 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1859 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1860 BT_DBG("skb %p len %d", skb, skb->len);
1861 hci_send_frame(skb);
1862
1863 conn->sent++;
1864 if (conn->sent == ~0)
1865 conn->sent = 0;
1866 }
1867 }
1868}
1869
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001870static inline void hci_sched_esco(struct hci_dev *hdev)
1871{
1872 struct hci_conn *conn;
1873 struct sk_buff *skb;
1874 int quote;
1875
1876 BT_DBG("%s", hdev->name);
1877
1878 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1879 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1880 BT_DBG("skb %p len %d", skb, skb->len);
1881 hci_send_frame(skb);
1882
1883 conn->sent++;
1884 if (conn->sent == ~0)
1885 conn->sent = 0;
1886 }
1887 }
1888}
1889
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001890static inline void hci_sched_le(struct hci_dev *hdev)
1891{
1892 struct hci_conn *conn;
1893 struct sk_buff *skb;
1894 int quote, cnt;
1895
1896 BT_DBG("%s", hdev->name);
1897
1898 if (!test_bit(HCI_RAW, &hdev->flags)) {
1899 /* LE tx timeout must be longer than maximum
1900 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03001901 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001902 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03001903 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001904 }
1905
1906 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1907 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1908 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1909 BT_DBG("skb %p len %d", skb, skb->len);
1910
1911 hci_send_frame(skb);
1912 hdev->le_last_tx = jiffies;
1913
1914 cnt--;
1915 conn->sent++;
1916 }
1917 }
1918 if (hdev->le_pkts)
1919 hdev->le_cnt = cnt;
1920 else
1921 hdev->acl_cnt = cnt;
1922}
1923
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924static void hci_tx_task(unsigned long arg)
1925{
1926 struct hci_dev *hdev = (struct hci_dev *) arg;
1927 struct sk_buff *skb;
1928
1929 read_lock(&hci_task_lock);
1930
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001931 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1932 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 /* Schedule queues and send stuff to HCI driver */
1935
1936 hci_sched_acl(hdev);
1937
1938 hci_sched_sco(hdev);
1939
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001940 hci_sched_esco(hdev);
1941
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001942 hci_sched_le(hdev);
1943
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 /* Send next queued raw (unknown type) packet */
1945 while ((skb = skb_dequeue(&hdev->raw_q)))
1946 hci_send_frame(skb);
1947
1948 read_unlock(&hci_task_lock);
1949}
1950
1951/* ----- HCI RX task (incoming data proccessing) ----- */
1952
1953/* ACL data packet */
1954static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1955{
1956 struct hci_acl_hdr *hdr = (void *) skb->data;
1957 struct hci_conn *conn;
1958 __u16 handle, flags;
1959
1960 skb_pull(skb, HCI_ACL_HDR_SIZE);
1961
1962 handle = __le16_to_cpu(hdr->handle);
1963 flags = hci_flags(handle);
1964 handle = hci_handle(handle);
1965
1966 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1967
1968 hdev->stat.acl_rx++;
1969
1970 hci_dev_lock(hdev);
1971 conn = hci_conn_hash_lookup_handle(hdev, handle);
1972 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001973
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 if (conn) {
1975 register struct hci_proto *hp;
1976
Marcel Holtmann04837f62006-07-03 10:02:33 +02001977 hci_conn_enter_active_mode(conn);
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001980 hp = hci_proto[HCI_PROTO_L2CAP];
1981 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 hp->recv_acldata(conn, skb, flags);
1983 return;
1984 }
1985 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001986 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 hdev->name, handle);
1988 }
1989
1990 kfree_skb(skb);
1991}
1992
1993/* SCO data packet */
1994static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1995{
1996 struct hci_sco_hdr *hdr = (void *) skb->data;
1997 struct hci_conn *conn;
1998 __u16 handle;
1999
2000 skb_pull(skb, HCI_SCO_HDR_SIZE);
2001
2002 handle = __le16_to_cpu(hdr->handle);
2003
2004 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2005
2006 hdev->stat.sco_rx++;
2007
2008 hci_dev_lock(hdev);
2009 conn = hci_conn_hash_lookup_handle(hdev, handle);
2010 hci_dev_unlock(hdev);
2011
2012 if (conn) {
2013 register struct hci_proto *hp;
2014
2015 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002016 hp = hci_proto[HCI_PROTO_SCO];
2017 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 hp->recv_scodata(conn, skb);
2019 return;
2020 }
2021 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002022 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 hdev->name, handle);
2024 }
2025
2026 kfree_skb(skb);
2027}
2028
Marcel Holtmann65164552005-10-28 19:20:48 +02002029static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030{
2031 struct hci_dev *hdev = (struct hci_dev *) arg;
2032 struct sk_buff *skb;
2033
2034 BT_DBG("%s", hdev->name);
2035
2036 read_lock(&hci_task_lock);
2037
2038 while ((skb = skb_dequeue(&hdev->rx_q))) {
2039 if (atomic_read(&hdev->promisc)) {
2040 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002041 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 }
2043
2044 if (test_bit(HCI_RAW, &hdev->flags)) {
2045 kfree_skb(skb);
2046 continue;
2047 }
2048
2049 if (test_bit(HCI_INIT, &hdev->flags)) {
2050 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002051 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 case HCI_ACLDATA_PKT:
2053 case HCI_SCODATA_PKT:
2054 kfree_skb(skb);
2055 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002056 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 }
2058
2059 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002060 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 case HCI_EVENT_PKT:
2062 hci_event_packet(hdev, skb);
2063 break;
2064
2065 case HCI_ACLDATA_PKT:
2066 BT_DBG("%s ACL data packet", hdev->name);
2067 hci_acldata_packet(hdev, skb);
2068 break;
2069
2070 case HCI_SCODATA_PKT:
2071 BT_DBG("%s SCO data packet", hdev->name);
2072 hci_scodata_packet(hdev, skb);
2073 break;
2074
2075 default:
2076 kfree_skb(skb);
2077 break;
2078 }
2079 }
2080
2081 read_unlock(&hci_task_lock);
2082}
2083
2084static void hci_cmd_task(unsigned long arg)
2085{
2086 struct hci_dev *hdev = (struct hci_dev *) arg;
2087 struct sk_buff *skb;
2088
2089 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002092 if (atomic_read(&hdev->cmd_cnt)) {
2093 skb = skb_dequeue(&hdev->cmd_q);
2094 if (!skb)
2095 return;
2096
Wei Yongjun7585b972009-02-25 18:29:52 +08002097 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002099 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2100 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 atomic_dec(&hdev->cmd_cnt);
2102 hci_send_frame(skb);
Ville Tervo6bd32322011-02-16 16:32:41 +02002103 mod_timer(&hdev->cmd_timer,
2104 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 } else {
2106 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002107 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 }
2109 }
2110}