blob: 60260cae3a04edae4b5ff5a35a0e7cfa231718b7 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/sock.h>
46
47#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100126 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100168 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300188 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200194 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 BT_DBG("%s %ld", hdev->name, opt);
200
201 /* Driver initialization */
202
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100209 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211 skb_queue_purge(&hdev->driver_init);
212
213 /* Mandatory initialization */
214
215 /* Reset */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200224 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230#if 0
231 /* Host buffer size */
232 {
233 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
240#endif
241
242 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Optional initialization */
255
256 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200257 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700261 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200263
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267}
268
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300269static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270{
271 BT_DBG("%s", hdev->name);
272
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 scan = opt;
280
281 BT_DBG("%s %x", hdev->name, scan);
282
283 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
287static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __u8 auth = opt;
290
291 BT_DBG("%s %x", hdev->name, auth);
292
293 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
297static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 encrypt = opt;
300
301 BT_DBG("%s %x", hdev->name, encrypt);
302
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200307static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __le16 policy = cpu_to_le16(opt);
310
Marcel Holtmanna418b892008-11-30 12:17:28 +0100311 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200312
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315}
316
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900317/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 * Device is held on return. */
319struct hci_dev *hci_dev_get(int index)
320{
321 struct hci_dev *hdev = NULL;
322 struct list_head *p;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/* ---- Inquiry support ---- */
342static void inquiry_cache_flush(struct hci_dev *hdev)
343{
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
346
347 BT_DBG("cache %p", cache);
348
349 cache->list = NULL;
350 while ((e = next)) {
351 next = e->next;
352 kfree(e);
353 }
354}
355
356struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357{
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
360
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
365 break;
366 return e;
367}
368
369void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200372 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200382
383 ie->next = cache->list;
384 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 cache->timestamp = jiffies;
390}
391
392static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393{
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
397 int copied = 0;
398
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
407 info++;
408 }
409
410 BT_DBG("cache %p, copied %d", cache, copied);
411 return copied;
412}
413
414static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415{
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
418
419 BT_DBG("%s", hdev->name);
420
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
422 return;
423
424 /* Start Inquiry */
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
431int hci_inquiry(void __user *arg)
432{
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
437 long timeo;
438 __u8 *buf;
439
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
441 return -EFAULT;
442
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return -ENODEV;
446
447 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 inquiry_cache_flush(hdev);
452 do_inquiry = 1;
453 }
454 hci_dev_unlock_bh(hdev);
455
Marcel Holtmann04837f62006-07-03 10:02:33 +0200456 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200457
458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
469 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200471 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 err = -ENOMEM;
473 goto done;
474 }
475
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
479
480 BT_DBG("num_rsp %d", ir.num_rsp);
481
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 ptr += sizeof(ir);
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485 ir.num_rsp))
486 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900487 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 err = -EFAULT;
489
490 kfree(buf);
491
492done:
493 hci_dev_put(hdev);
494 return err;
495}
496
497/* ---- HCI ioctl helpers ---- */
498
499int hci_dev_open(__u16 dev)
500{
501 struct hci_dev *hdev;
502 int ret = 0;
503
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200504 hdev = hci_dev_get(dev);
505 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return -ENODEV;
507
508 BT_DBG("%s %p", hdev->name, hdev);
509
510 hci_req_lock(hdev);
511
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL;
514 goto done;
515 }
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (test_bit(HCI_UP, &hdev->flags)) {
518 ret = -EALREADY;
519 goto done;
520 }
521
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
524
Marcel Holtmann943da252010-02-13 02:28:41 +0100525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200537 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Marcel Holtmann04837f62006-07-03 10:02:33 +0200539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900555 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
560
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
563
564 if (hdev->flush)
565 hdev->flush(hdev);
566
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
570 }
571
572 hdev->close(hdev);
573 hdev->flags = 0;
574 }
575
576done:
577 hci_req_unlock(hdev);
578 hci_dev_put(hdev);
579 return ret;
580}
581
582static int hci_dev_do_close(struct hci_dev *hdev)
583{
584 BT_DBG("%s %p", hdev->name, hdev);
585
586 hci_req_cancel(hdev, ENODEV);
587 hci_req_lock(hdev);
588
Thomas Gleixner6f5ef992011-03-24 20:16:42 +0100589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300593 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
606
607 hci_notify(hdev, HCI_DEV_DOWN);
608
609 if (hdev->flush)
610 hdev->flush(hdev);
611
612 /* Reset device */
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 clear_bit(HCI_INIT, &hdev->flags);
620 }
621
622 /* Kill cmd task */
623 tasklet_kill(&hdev->cmd_task);
624
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
629
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 /* After this point our queues are empty
637 * and no tasks are scheduled. */
638 hdev->close(hdev);
639
Johan Hedberg5add6af2010-12-16 10:00:37 +0200640 mgmt_powered(hdev->id, 0);
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 /* Clear flags */
643 hdev->flags = 0;
644
645 hci_req_unlock(hdev);
646
647 hci_dev_put(hdev);
648 return 0;
649}
650
651int hci_dev_close(__u16 dev)
652{
653 struct hci_dev *hdev;
654 int err;
655
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200656 hdev = hci_dev_get(dev);
657 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return -ENODEV;
659 err = hci_dev_do_close(hdev);
660 hci_dev_put(hdev);
661 return err;
662}
663
664int hci_dev_reset(__u16 dev)
665{
666 struct hci_dev *hdev;
667 int ret = 0;
668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200669 hdev = hci_dev_get(dev);
670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -ENODEV;
672
673 hci_req_lock(hdev);
674 tasklet_disable(&hdev->tx_task);
675
676 if (!test_bit(HCI_UP, &hdev->flags))
677 goto done;
678
679 /* Drop queues */
680 skb_queue_purge(&hdev->rx_q);
681 skb_queue_purge(&hdev->cmd_q);
682
683 hci_dev_lock_bh(hdev);
684 inquiry_cache_flush(hdev);
685 hci_conn_hash_flush(hdev);
686 hci_dev_unlock_bh(hdev);
687
688 if (hdev->flush)
689 hdev->flush(hdev);
690
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900691 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300692 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200695 ret = __hci_request(hdev, hci_reset_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698done:
699 tasklet_enable(&hdev->tx_task);
700 hci_req_unlock(hdev);
701 hci_dev_put(hdev);
702 return ret;
703}
704
705int hci_dev_reset_stat(__u16 dev)
706{
707 struct hci_dev *hdev;
708 int ret = 0;
709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200710 hdev = hci_dev_get(dev);
711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return -ENODEV;
713
714 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716 hci_dev_put(hdev);
717
718 return ret;
719}
720
721int hci_dev_cmd(unsigned int cmd, void __user *arg)
722{
723 struct hci_dev *hdev;
724 struct hci_dev_req dr;
725 int err = 0;
726
727 if (copy_from_user(&dr, arg, sizeof(dr)))
728 return -EFAULT;
729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200730 hdev = hci_dev_get(dr.dev_id);
731 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 return -ENODEV;
733
734 switch (cmd) {
735 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200736 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 break;
739
740 case HCISETENCRYPT:
741 if (!lmp_encrypt_capable(hdev)) {
742 err = -EOPNOTSUPP;
743 break;
744 }
745
746 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200748 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (err)
751 break;
752 }
753
Marcel Holtmann04837f62006-07-03 10:02:33 +0200754 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 break;
757
758 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200759 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 break;
762
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763 case HCISETLINKPOL:
764 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
766 break;
767
768 case HCISETLINKMODE:
769 hdev->link_mode = ((__u16) dr.dev_opt) &
770 (HCI_LM_MASTER | HCI_LM_ACCEPT);
771 break;
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 case HCISETPTYPE:
774 hdev->pkt_type = (__u16) dr.dev_opt;
775 break;
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200778 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
779 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 break;
781
782 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200783 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 default:
788 err = -EINVAL;
789 break;
790 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 hci_dev_put(hdev);
793 return err;
794}
795
796int hci_get_dev_list(void __user *arg)
797{
798 struct hci_dev_list_req *dl;
799 struct hci_dev_req *dr;
800 struct list_head *p;
801 int n = 0, size, err;
802 __u16 dev_num;
803
804 if (get_user(dev_num, (__u16 __user *) arg))
805 return -EFAULT;
806
807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808 return -EINVAL;
809
810 size = sizeof(*dl) + dev_num * sizeof(*dr);
811
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200812 dl = kzalloc(size, GFP_KERNEL);
813 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 return -ENOMEM;
815
816 dr = dl->dev_req;
817
818 read_lock_bh(&hci_dev_list_lock);
819 list_for_each(p, &hci_dev_list) {
820 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200823
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200824 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200825
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (++n >= dev_num)
833 break;
834 }
835 read_unlock_bh(&hci_dev_list_lock);
836
837 dl->dev_num = n;
838 size = sizeof(*dl) + n * sizeof(*dr);
839
840 err = copy_to_user(arg, dl, size);
841 kfree(dl);
842
843 return err ? -EFAULT : 0;
844}
845
846int hci_get_dev_info(void __user *arg)
847{
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
850 int err = 0;
851
852 if (copy_from_user(&di, arg, sizeof(di)))
853 return -EFAULT;
854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200855 hdev = hci_dev_get(di.dev_id);
856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -ENODEV;
858
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200859 hci_del_off_timer(hdev);
860
Johan Hedbergc542a062011-01-26 13:11:03 +0200861 if (!test_bit(HCI_MGMT, &hdev->flags))
862 set_bit(HCI_PAIRABLE, &hdev->flags);
863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 strcpy(di.name, hdev->name);
865 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100866 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 di.flags = hdev->flags;
868 di.pkt_type = hdev->pkt_type;
869 di.acl_mtu = hdev->acl_mtu;
870 di.acl_pkts = hdev->acl_pkts;
871 di.sco_mtu = hdev->sco_mtu;
872 di.sco_pkts = hdev->sco_pkts;
873 di.link_policy = hdev->link_policy;
874 di.link_mode = hdev->link_mode;
875
876 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877 memcpy(&di.features, &hdev->features, sizeof(di.features));
878
879 if (copy_to_user(arg, &di, sizeof(di)))
880 err = -EFAULT;
881
882 hci_dev_put(hdev);
883
884 return err;
885}
886
887/* ---- Interface to HCI drivers ---- */
888
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200889static int hci_rfkill_set_block(void *data, bool blocked)
890{
891 struct hci_dev *hdev = data;
892
893 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
894
895 if (!blocked)
896 return 0;
897
898 hci_dev_do_close(hdev);
899
900 return 0;
901}
902
903static const struct rfkill_ops hci_rfkill_ops = {
904 .set_block = hci_rfkill_set_block,
905};
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907/* Alloc HCI device */
908struct hci_dev *hci_alloc_dev(void)
909{
910 struct hci_dev *hdev;
911
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200912 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 if (!hdev)
914 return NULL;
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 skb_queue_head_init(&hdev->driver_init);
917
918 return hdev;
919}
920EXPORT_SYMBOL(hci_alloc_dev);
921
922/* Free HCI device */
923void hci_free_dev(struct hci_dev *hdev)
924{
925 skb_queue_purge(&hdev->driver_init);
926
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200927 /* will free via device release */
928 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929}
930EXPORT_SYMBOL(hci_free_dev);
931
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200932static void hci_power_on(struct work_struct *work)
933{
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936 BT_DBG("%s", hdev->name);
937
938 if (hci_dev_open(hdev->id) < 0)
939 return;
940
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 mod_timer(&hdev->off_timer,
943 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 mgmt_index_added(hdev->id);
947}
948
949static void hci_power_off(struct work_struct *work)
950{
951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
952
953 BT_DBG("%s", hdev->name);
954
955 hci_dev_close(hdev->id);
956}
957
958static void hci_auto_off(unsigned long data)
959{
960 struct hci_dev *hdev = (struct hci_dev *) data;
961
962 BT_DBG("%s", hdev->name);
963
964 clear_bit(HCI_AUTO_OFF, &hdev->flags);
965
966 queue_work(hdev->workqueue, &hdev->power_off);
967}
968
969void hci_del_off_timer(struct hci_dev *hdev)
970{
971 BT_DBG("%s", hdev->name);
972
973 clear_bit(HCI_AUTO_OFF, &hdev->flags);
974 del_timer(&hdev->off_timer);
975}
976
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200977int hci_uuids_clear(struct hci_dev *hdev)
978{
979 struct list_head *p, *n;
980
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
983
984 uuid = list_entry(p, struct bt_uuid, list);
985
986 list_del(p);
987 kfree(uuid);
988 }
989
990 return 0;
991}
992
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200993int hci_link_keys_clear(struct hci_dev *hdev)
994{
995 struct list_head *p, *n;
996
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
999
1000 key = list_entry(p, struct link_key, list);
1001
1002 list_del(p);
1003 kfree(key);
1004 }
1005
1006 return 0;
1007}
1008
1009struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1010{
1011 struct list_head *p;
1012
1013 list_for_each(p, &hdev->link_keys) {
1014 struct link_key *k;
1015
1016 k = list_entry(p, struct link_key, list);
1017
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019 return k;
1020 }
1021
1022 return NULL;
1023}
1024
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001025static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026 u8 key_type, u8 old_key_type)
1027{
1028 /* Legacy key */
1029 if (key_type < 0x03)
1030 return 1;
1031
1032 /* Debug keys are insecure so don't store them persistently */
1033 if (key_type == HCI_LK_DEBUG_COMBINATION)
1034 return 0;
1035
1036 /* Changed combination key and there's no previous one */
1037 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1038 return 0;
1039
1040 /* Security mode 3 case */
1041 if (!conn)
1042 return 1;
1043
1044 /* Neither local nor remote side had no-bonding as requirement */
1045 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1046 return 1;
1047
1048 /* Local side had dedicated bonding as requirement */
1049 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1050 return 1;
1051
1052 /* Remote side had dedicated bonding as requirement */
1053 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1054 return 1;
1055
1056 /* If none of the above criteria match, then don't store the key
1057 * persistently */
1058 return 0;
1059}
1060
1061int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1062 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001063{
1064 struct link_key *key, *old_key;
1065 u8 old_key_type;
1066
1067 old_key = hci_find_link_key(hdev, bdaddr);
1068 if (old_key) {
1069 old_key_type = old_key->type;
1070 key = old_key;
1071 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001072 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001073 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1074 if (!key)
1075 return -ENOMEM;
1076 list_add(&key->list, &hdev->link_keys);
1077 }
1078
1079 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1080
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001081 /* Some buggy controller combinations generate a changed
1082 * combination key for legacy pairing even when there's no
1083 * previous key */
1084 if (type == HCI_LK_CHANGED_COMBINATION &&
1085 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001086 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001087 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001088 if (conn)
1089 conn->key_type = type;
1090 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001091
1092 if (new_key && !hci_persistent_key(hdev, conn, type, old_key_type)) {
1093 list_del(&key->list);
1094 kfree(key);
1095 return 0;
1096 }
1097
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001098 bacpy(&key->bdaddr, bdaddr);
1099 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001100 key->pin_len = pin_len;
1101
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001102 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001103 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001104 else
1105 key->type = type;
1106
1107 if (new_key)
1108 mgmt_new_key(hdev->id, key);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001109
1110 return 0;
1111}
1112
1113int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1114{
1115 struct link_key *key;
1116
1117 key = hci_find_link_key(hdev, bdaddr);
1118 if (!key)
1119 return -ENOENT;
1120
1121 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1122
1123 list_del(&key->list);
1124 kfree(key);
1125
1126 return 0;
1127}
1128
Ville Tervo6bd32322011-02-16 16:32:41 +02001129/* HCI command timer function */
1130static void hci_cmd_timer(unsigned long arg)
1131{
1132 struct hci_dev *hdev = (void *) arg;
1133
1134 BT_ERR("%s command tx timeout", hdev->name);
1135 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovan10572132011-03-16 15:36:29 -03001136 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001137 tasklet_schedule(&hdev->cmd_task);
1138}
1139
Szymon Janc2763eda2011-03-22 13:12:22 +01001140struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1141 bdaddr_t *bdaddr)
1142{
1143 struct oob_data *data;
1144
1145 list_for_each_entry(data, &hdev->remote_oob_data, list)
1146 if (bacmp(bdaddr, &data->bdaddr) == 0)
1147 return data;
1148
1149 return NULL;
1150}
1151
1152int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1153{
1154 struct oob_data *data;
1155
1156 data = hci_find_remote_oob_data(hdev, bdaddr);
1157 if (!data)
1158 return -ENOENT;
1159
1160 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1161
1162 list_del(&data->list);
1163 kfree(data);
1164
1165 return 0;
1166}
1167
1168int hci_remote_oob_data_clear(struct hci_dev *hdev)
1169{
1170 struct oob_data *data, *n;
1171
1172 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1173 list_del(&data->list);
1174 kfree(data);
1175 }
1176
1177 return 0;
1178}
1179
1180int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1181 u8 *randomizer)
1182{
1183 struct oob_data *data;
1184
1185 data = hci_find_remote_oob_data(hdev, bdaddr);
1186
1187 if (!data) {
1188 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1189 if (!data)
1190 return -ENOMEM;
1191
1192 bacpy(&data->bdaddr, bdaddr);
1193 list_add(&data->list, &hdev->remote_oob_data);
1194 }
1195
1196 memcpy(data->hash, hash, sizeof(data->hash));
1197 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1198
1199 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1200
1201 return 0;
1202}
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204/* Register HCI device */
1205int hci_register_dev(struct hci_dev *hdev)
1206{
1207 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001208 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001210 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1211 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
1213 if (!hdev->open || !hdev->close || !hdev->destruct)
1214 return -EINVAL;
1215
1216 write_lock_bh(&hci_dev_list_lock);
1217
1218 /* Find first available device id */
1219 list_for_each(p, &hci_dev_list) {
1220 if (list_entry(p, struct hci_dev, list)->id != id)
1221 break;
1222 head = p; id++;
1223 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001224
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 sprintf(hdev->name, "hci%d", id);
1226 hdev->id = id;
1227 list_add(&hdev->list, head);
1228
1229 atomic_set(&hdev->refcnt, 1);
1230 spin_lock_init(&hdev->lock);
1231
1232 hdev->flags = 0;
1233 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001234 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001236 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Marcel Holtmann04837f62006-07-03 10:02:33 +02001238 hdev->idle_timeout = 0;
1239 hdev->sniff_max_interval = 800;
1240 hdev->sniff_min_interval = 80;
1241
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001242 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1244 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1245
1246 skb_queue_head_init(&hdev->rx_q);
1247 skb_queue_head_init(&hdev->cmd_q);
1248 skb_queue_head_init(&hdev->raw_q);
1249
Ville Tervo6bd32322011-02-16 16:32:41 +02001250 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1251
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301252 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001253 hdev->reassembly[i] = NULL;
1254
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001256 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 inquiry_cache_init(hdev);
1259
1260 hci_conn_hash_init(hdev);
1261
David Millerea4bd8b2010-07-30 21:54:49 -07001262 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001263
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001264 INIT_LIST_HEAD(&hdev->uuids);
1265
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001266 INIT_LIST_HEAD(&hdev->link_keys);
1267
Szymon Janc2763eda2011-03-22 13:12:22 +01001268 INIT_LIST_HEAD(&hdev->remote_oob_data);
1269
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001270 INIT_WORK(&hdev->power_on, hci_power_on);
1271 INIT_WORK(&hdev->power_off, hci_power_off);
1272 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1275
1276 atomic_set(&hdev->promisc, 0);
1277
1278 write_unlock_bh(&hci_dev_list_lock);
1279
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001280 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1281 if (!hdev->workqueue)
1282 goto nomem;
1283
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 hci_register_sysfs(hdev);
1285
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001286 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1287 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1288 if (hdev->rfkill) {
1289 if (rfkill_register(hdev->rfkill) < 0) {
1290 rfkill_destroy(hdev->rfkill);
1291 hdev->rfkill = NULL;
1292 }
1293 }
1294
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001295 set_bit(HCI_AUTO_OFF, &hdev->flags);
1296 set_bit(HCI_SETUP, &hdev->flags);
1297 queue_work(hdev->workqueue, &hdev->power_on);
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 hci_notify(hdev, HCI_DEV_REG);
1300
1301 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001302
1303nomem:
1304 write_lock_bh(&hci_dev_list_lock);
1305 list_del(&hdev->list);
1306 write_unlock_bh(&hci_dev_list_lock);
1307
1308 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309}
1310EXPORT_SYMBOL(hci_register_dev);
1311
1312/* Unregister HCI device */
1313int hci_unregister_dev(struct hci_dev *hdev)
1314{
Marcel Holtmannef222012007-07-11 06:42:04 +02001315 int i;
1316
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001317 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 write_lock_bh(&hci_dev_list_lock);
1320 list_del(&hdev->list);
1321 write_unlock_bh(&hci_dev_list_lock);
1322
1323 hci_dev_do_close(hdev);
1324
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301325 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001326 kfree_skb(hdev->reassembly[i]);
1327
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001328 if (!test_bit(HCI_INIT, &hdev->flags) &&
1329 !test_bit(HCI_SETUP, &hdev->flags))
1330 mgmt_index_removed(hdev->id);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_notify(hdev, HCI_DEV_UNREG);
1333
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001334 if (hdev->rfkill) {
1335 rfkill_unregister(hdev->rfkill);
1336 rfkill_destroy(hdev->rfkill);
1337 }
1338
Dave Young147e2d52008-03-05 18:45:59 -08001339 hci_unregister_sysfs(hdev);
1340
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001341 hci_del_off_timer(hdev);
1342
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001343 destroy_workqueue(hdev->workqueue);
1344
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001345 hci_dev_lock_bh(hdev);
1346 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001347 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001348 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001349 hci_remote_oob_data_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001350 hci_dev_unlock_bh(hdev);
1351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001353
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return 0;
1355}
1356EXPORT_SYMBOL(hci_unregister_dev);
1357
1358/* Suspend HCI device */
1359int hci_suspend_dev(struct hci_dev *hdev)
1360{
1361 hci_notify(hdev, HCI_DEV_SUSPEND);
1362 return 0;
1363}
1364EXPORT_SYMBOL(hci_suspend_dev);
1365
1366/* Resume HCI device */
1367int hci_resume_dev(struct hci_dev *hdev)
1368{
1369 hci_notify(hdev, HCI_DEV_RESUME);
1370 return 0;
1371}
1372EXPORT_SYMBOL(hci_resume_dev);
1373
Marcel Holtmann76bca882009-11-18 00:40:39 +01001374/* Receive frame from HCI drivers */
1375int hci_recv_frame(struct sk_buff *skb)
1376{
1377 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1378 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1379 && !test_bit(HCI_INIT, &hdev->flags))) {
1380 kfree_skb(skb);
1381 return -ENXIO;
1382 }
1383
1384 /* Incomming skb */
1385 bt_cb(skb)->incoming = 1;
1386
1387 /* Time stamp */
1388 __net_timestamp(skb);
1389
1390 /* Queue frame for rx task */
1391 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001392 tasklet_schedule(&hdev->rx_task);
1393
Marcel Holtmann76bca882009-11-18 00:40:39 +01001394 return 0;
1395}
1396EXPORT_SYMBOL(hci_recv_frame);
1397
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301398static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001399 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301400{
1401 int len = 0;
1402 int hlen = 0;
1403 int remain = count;
1404 struct sk_buff *skb;
1405 struct bt_skb_cb *scb;
1406
1407 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1408 index >= NUM_REASSEMBLY)
1409 return -EILSEQ;
1410
1411 skb = hdev->reassembly[index];
1412
1413 if (!skb) {
1414 switch (type) {
1415 case HCI_ACLDATA_PKT:
1416 len = HCI_MAX_FRAME_SIZE;
1417 hlen = HCI_ACL_HDR_SIZE;
1418 break;
1419 case HCI_EVENT_PKT:
1420 len = HCI_MAX_EVENT_SIZE;
1421 hlen = HCI_EVENT_HDR_SIZE;
1422 break;
1423 case HCI_SCODATA_PKT:
1424 len = HCI_MAX_SCO_SIZE;
1425 hlen = HCI_SCO_HDR_SIZE;
1426 break;
1427 }
1428
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001429 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301430 if (!skb)
1431 return -ENOMEM;
1432
1433 scb = (void *) skb->cb;
1434 scb->expect = hlen;
1435 scb->pkt_type = type;
1436
1437 skb->dev = (void *) hdev;
1438 hdev->reassembly[index] = skb;
1439 }
1440
1441 while (count) {
1442 scb = (void *) skb->cb;
1443 len = min(scb->expect, (__u16)count);
1444
1445 memcpy(skb_put(skb, len), data, len);
1446
1447 count -= len;
1448 data += len;
1449 scb->expect -= len;
1450 remain = count;
1451
1452 switch (type) {
1453 case HCI_EVENT_PKT:
1454 if (skb->len == HCI_EVENT_HDR_SIZE) {
1455 struct hci_event_hdr *h = hci_event_hdr(skb);
1456 scb->expect = h->plen;
1457
1458 if (skb_tailroom(skb) < scb->expect) {
1459 kfree_skb(skb);
1460 hdev->reassembly[index] = NULL;
1461 return -ENOMEM;
1462 }
1463 }
1464 break;
1465
1466 case HCI_ACLDATA_PKT:
1467 if (skb->len == HCI_ACL_HDR_SIZE) {
1468 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1469 scb->expect = __le16_to_cpu(h->dlen);
1470
1471 if (skb_tailroom(skb) < scb->expect) {
1472 kfree_skb(skb);
1473 hdev->reassembly[index] = NULL;
1474 return -ENOMEM;
1475 }
1476 }
1477 break;
1478
1479 case HCI_SCODATA_PKT:
1480 if (skb->len == HCI_SCO_HDR_SIZE) {
1481 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1482 scb->expect = h->dlen;
1483
1484 if (skb_tailroom(skb) < scb->expect) {
1485 kfree_skb(skb);
1486 hdev->reassembly[index] = NULL;
1487 return -ENOMEM;
1488 }
1489 }
1490 break;
1491 }
1492
1493 if (scb->expect == 0) {
1494 /* Complete frame */
1495
1496 bt_cb(skb)->pkt_type = type;
1497 hci_recv_frame(skb);
1498
1499 hdev->reassembly[index] = NULL;
1500 return remain;
1501 }
1502 }
1503
1504 return remain;
1505}
1506
Marcel Holtmannef222012007-07-11 06:42:04 +02001507int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1508{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301509 int rem = 0;
1510
Marcel Holtmannef222012007-07-11 06:42:04 +02001511 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1512 return -EILSEQ;
1513
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001514 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001515 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301516 if (rem < 0)
1517 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001518
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301519 data += (count - rem);
1520 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001521 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001522
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301523 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001524}
1525EXPORT_SYMBOL(hci_recv_fragment);
1526
Suraj Sumangala99811512010-07-14 13:02:19 +05301527#define STREAM_REASSEMBLY 0
1528
1529int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1530{
1531 int type;
1532 int rem = 0;
1533
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001534 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301535 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1536
1537 if (!skb) {
1538 struct { char type; } *pkt;
1539
1540 /* Start of the frame */
1541 pkt = data;
1542 type = pkt->type;
1543
1544 data++;
1545 count--;
1546 } else
1547 type = bt_cb(skb)->pkt_type;
1548
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001549 rem = hci_reassembly(hdev, type, data, count,
1550 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301551 if (rem < 0)
1552 return rem;
1553
1554 data += (count - rem);
1555 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001556 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301557
1558 return rem;
1559}
1560EXPORT_SYMBOL(hci_recv_stream_fragment);
1561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562/* ---- Interface to upper protocols ---- */
1563
1564/* Register/Unregister protocols.
1565 * hci_task_lock is used to ensure that no tasks are running. */
1566int hci_register_proto(struct hci_proto *hp)
1567{
1568 int err = 0;
1569
1570 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1571
1572 if (hp->id >= HCI_MAX_PROTO)
1573 return -EINVAL;
1574
1575 write_lock_bh(&hci_task_lock);
1576
1577 if (!hci_proto[hp->id])
1578 hci_proto[hp->id] = hp;
1579 else
1580 err = -EEXIST;
1581
1582 write_unlock_bh(&hci_task_lock);
1583
1584 return err;
1585}
1586EXPORT_SYMBOL(hci_register_proto);
1587
1588int hci_unregister_proto(struct hci_proto *hp)
1589{
1590 int err = 0;
1591
1592 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1593
1594 if (hp->id >= HCI_MAX_PROTO)
1595 return -EINVAL;
1596
1597 write_lock_bh(&hci_task_lock);
1598
1599 if (hci_proto[hp->id])
1600 hci_proto[hp->id] = NULL;
1601 else
1602 err = -ENOENT;
1603
1604 write_unlock_bh(&hci_task_lock);
1605
1606 return err;
1607}
1608EXPORT_SYMBOL(hci_unregister_proto);
1609
1610int hci_register_cb(struct hci_cb *cb)
1611{
1612 BT_DBG("%p name %s", cb, cb->name);
1613
1614 write_lock_bh(&hci_cb_list_lock);
1615 list_add(&cb->list, &hci_cb_list);
1616 write_unlock_bh(&hci_cb_list_lock);
1617
1618 return 0;
1619}
1620EXPORT_SYMBOL(hci_register_cb);
1621
1622int hci_unregister_cb(struct hci_cb *cb)
1623{
1624 BT_DBG("%p name %s", cb, cb->name);
1625
1626 write_lock_bh(&hci_cb_list_lock);
1627 list_del(&cb->list);
1628 write_unlock_bh(&hci_cb_list_lock);
1629
1630 return 0;
1631}
1632EXPORT_SYMBOL(hci_unregister_cb);
1633
1634static int hci_send_frame(struct sk_buff *skb)
1635{
1636 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1637
1638 if (!hdev) {
1639 kfree_skb(skb);
1640 return -ENODEV;
1641 }
1642
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001643 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
1645 if (atomic_read(&hdev->promisc)) {
1646 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001647 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001649 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 }
1651
1652 /* Get rid of skb owner, prior to sending to the driver. */
1653 skb_orphan(skb);
1654
1655 return hdev->send(skb);
1656}
1657
1658/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001659int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660{
1661 int len = HCI_COMMAND_HDR_SIZE + plen;
1662 struct hci_command_hdr *hdr;
1663 struct sk_buff *skb;
1664
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001665 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666
1667 skb = bt_skb_alloc(len, GFP_ATOMIC);
1668 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001669 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 return -ENOMEM;
1671 }
1672
1673 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001674 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 hdr->plen = plen;
1676
1677 if (plen)
1678 memcpy(skb_put(skb, plen), param, plen);
1679
1680 BT_DBG("skb len %d", skb->len);
1681
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001682 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001684
Johan Hedberga5040ef2011-01-10 13:28:59 +02001685 if (test_bit(HCI_INIT, &hdev->flags))
1686 hdev->init_last_cmd = opcode;
1687
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001689 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 return 0;
1692}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001695void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696{
1697 struct hci_command_hdr *hdr;
1698
1699 if (!hdev->sent_cmd)
1700 return NULL;
1701
1702 hdr = (void *) hdev->sent_cmd->data;
1703
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001704 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return NULL;
1706
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001707 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1710}
1711
1712/* Send ACL data */
1713static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1714{
1715 struct hci_acl_hdr *hdr;
1716 int len = skb->len;
1717
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001718 skb_push(skb, HCI_ACL_HDR_SIZE);
1719 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001720 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001721 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1722 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723}
1724
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001725void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
1727 struct hci_dev *hdev = conn->hdev;
1728 struct sk_buff *list;
1729
1730 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1731
1732 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001733 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001734 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001736 list = skb_shinfo(skb)->frag_list;
1737 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 /* Non fragmented */
1739 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1740
1741 skb_queue_tail(&conn->data_q, skb);
1742 } else {
1743 /* Fragmented */
1744 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1745
1746 skb_shinfo(skb)->frag_list = NULL;
1747
1748 /* Queue all fragments atomically */
1749 spin_lock_bh(&conn->data_q.lock);
1750
1751 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001752
1753 flags &= ~ACL_START;
1754 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 do {
1756 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001760 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1763
1764 __skb_queue_tail(&conn->data_q, skb);
1765 } while (list);
1766
1767 spin_unlock_bh(&conn->data_q.lock);
1768 }
1769
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001770 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772EXPORT_SYMBOL(hci_send_acl);
1773
1774/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001775void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
1777 struct hci_dev *hdev = conn->hdev;
1778 struct hci_sco_hdr hdr;
1779
1780 BT_DBG("%s len %d", hdev->name, skb->len);
1781
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001782 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 hdr.dlen = skb->len;
1784
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001785 skb_push(skb, HCI_SCO_HDR_SIZE);
1786 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001787 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001790 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001793 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794}
1795EXPORT_SYMBOL(hci_send_sco);
1796
1797/* ---- HCI TX task (outgoing data) ---- */
1798
1799/* HCI Connection scheduler */
1800static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1801{
1802 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001803 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 int num = 0, min = ~0;
1805 struct list_head *p;
1806
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001807 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 * added and removed with TX task disabled. */
1809 list_for_each(p, &h->list) {
1810 struct hci_conn *c;
1811 c = list_entry(p, struct hci_conn, list);
1812
Marcel Holtmann769be972008-07-14 20:13:49 +02001813 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001815
1816 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1817 continue;
1818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 num++;
1820
1821 if (c->sent < min) {
1822 min = c->sent;
1823 conn = c;
1824 }
1825 }
1826
1827 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001828 int cnt, q;
1829
1830 switch (conn->type) {
1831 case ACL_LINK:
1832 cnt = hdev->acl_cnt;
1833 break;
1834 case SCO_LINK:
1835 case ESCO_LINK:
1836 cnt = hdev->sco_cnt;
1837 break;
1838 case LE_LINK:
1839 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1840 break;
1841 default:
1842 cnt = 0;
1843 BT_ERR("Unknown link type");
1844 }
1845
1846 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 *quote = q ? q : 1;
1848 } else
1849 *quote = 0;
1850
1851 BT_DBG("conn %p quote %d", conn, *quote);
1852 return conn;
1853}
1854
Ville Tervobae1f5d2011-02-10 22:38:53 -03001855static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856{
1857 struct hci_conn_hash *h = &hdev->conn_hash;
1858 struct list_head *p;
1859 struct hci_conn *c;
1860
Ville Tervobae1f5d2011-02-10 22:38:53 -03001861 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
1863 /* Kill stalled connections */
1864 list_for_each(p, &h->list) {
1865 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03001866 if (c->type == type && c->sent) {
1867 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 hdev->name, batostr(&c->dst));
1869 hci_acl_disconn(c, 0x13);
1870 }
1871 }
1872}
1873
1874static inline void hci_sched_acl(struct hci_dev *hdev)
1875{
1876 struct hci_conn *conn;
1877 struct sk_buff *skb;
1878 int quote;
1879
1880 BT_DBG("%s", hdev->name);
1881
1882 if (!test_bit(HCI_RAW, &hdev->flags)) {
1883 /* ACL tx timeout must be longer than maximum
1884 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001885 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001886 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
1888
1889 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1890 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1891 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001892
1893 hci_conn_enter_active_mode(conn);
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 hci_send_frame(skb);
1896 hdev->acl_last_tx = jiffies;
1897
1898 hdev->acl_cnt--;
1899 conn->sent++;
1900 }
1901 }
1902}
1903
1904/* Schedule SCO */
1905static inline void hci_sched_sco(struct hci_dev *hdev)
1906{
1907 struct hci_conn *conn;
1908 struct sk_buff *skb;
1909 int quote;
1910
1911 BT_DBG("%s", hdev->name);
1912
1913 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1914 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1915 BT_DBG("skb %p len %d", skb, skb->len);
1916 hci_send_frame(skb);
1917
1918 conn->sent++;
1919 if (conn->sent == ~0)
1920 conn->sent = 0;
1921 }
1922 }
1923}
1924
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001925static inline void hci_sched_esco(struct hci_dev *hdev)
1926{
1927 struct hci_conn *conn;
1928 struct sk_buff *skb;
1929 int quote;
1930
1931 BT_DBG("%s", hdev->name);
1932
1933 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1934 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1935 BT_DBG("skb %p len %d", skb, skb->len);
1936 hci_send_frame(skb);
1937
1938 conn->sent++;
1939 if (conn->sent == ~0)
1940 conn->sent = 0;
1941 }
1942 }
1943}
1944
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001945static inline void hci_sched_le(struct hci_dev *hdev)
1946{
1947 struct hci_conn *conn;
1948 struct sk_buff *skb;
1949 int quote, cnt;
1950
1951 BT_DBG("%s", hdev->name);
1952
1953 if (!test_bit(HCI_RAW, &hdev->flags)) {
1954 /* LE tx timeout must be longer than maximum
1955 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03001956 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001957 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001958 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001959 }
1960
1961 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1962 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1963 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1964 BT_DBG("skb %p len %d", skb, skb->len);
1965
1966 hci_send_frame(skb);
1967 hdev->le_last_tx = jiffies;
1968
1969 cnt--;
1970 conn->sent++;
1971 }
1972 }
1973 if (hdev->le_pkts)
1974 hdev->le_cnt = cnt;
1975 else
1976 hdev->acl_cnt = cnt;
1977}
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979static void hci_tx_task(unsigned long arg)
1980{
1981 struct hci_dev *hdev = (struct hci_dev *) arg;
1982 struct sk_buff *skb;
1983
1984 read_lock(&hci_task_lock);
1985
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001986 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1987 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988
1989 /* Schedule queues and send stuff to HCI driver */
1990
1991 hci_sched_acl(hdev);
1992
1993 hci_sched_sco(hdev);
1994
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001995 hci_sched_esco(hdev);
1996
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001997 hci_sched_le(hdev);
1998
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 /* Send next queued raw (unknown type) packet */
2000 while ((skb = skb_dequeue(&hdev->raw_q)))
2001 hci_send_frame(skb);
2002
2003 read_unlock(&hci_task_lock);
2004}
2005
2006/* ----- HCI RX task (incoming data proccessing) ----- */
2007
2008/* ACL data packet */
2009static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2010{
2011 struct hci_acl_hdr *hdr = (void *) skb->data;
2012 struct hci_conn *conn;
2013 __u16 handle, flags;
2014
2015 skb_pull(skb, HCI_ACL_HDR_SIZE);
2016
2017 handle = __le16_to_cpu(hdr->handle);
2018 flags = hci_flags(handle);
2019 handle = hci_handle(handle);
2020
2021 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2022
2023 hdev->stat.acl_rx++;
2024
2025 hci_dev_lock(hdev);
2026 conn = hci_conn_hash_lookup_handle(hdev, handle);
2027 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 if (conn) {
2030 register struct hci_proto *hp;
2031
Marcel Holtmann04837f62006-07-03 10:02:33 +02002032 hci_conn_enter_active_mode(conn);
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002035 hp = hci_proto[HCI_PROTO_L2CAP];
2036 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 hp->recv_acldata(conn, skb, flags);
2038 return;
2039 }
2040 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002041 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 hdev->name, handle);
2043 }
2044
2045 kfree_skb(skb);
2046}
2047
2048/* SCO data packet */
2049static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2050{
2051 struct hci_sco_hdr *hdr = (void *) skb->data;
2052 struct hci_conn *conn;
2053 __u16 handle;
2054
2055 skb_pull(skb, HCI_SCO_HDR_SIZE);
2056
2057 handle = __le16_to_cpu(hdr->handle);
2058
2059 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2060
2061 hdev->stat.sco_rx++;
2062
2063 hci_dev_lock(hdev);
2064 conn = hci_conn_hash_lookup_handle(hdev, handle);
2065 hci_dev_unlock(hdev);
2066
2067 if (conn) {
2068 register struct hci_proto *hp;
2069
2070 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002071 hp = hci_proto[HCI_PROTO_SCO];
2072 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 hp->recv_scodata(conn, skb);
2074 return;
2075 }
2076 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002077 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 hdev->name, handle);
2079 }
2080
2081 kfree_skb(skb);
2082}
2083
Marcel Holtmann65164552005-10-28 19:20:48 +02002084static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085{
2086 struct hci_dev *hdev = (struct hci_dev *) arg;
2087 struct sk_buff *skb;
2088
2089 BT_DBG("%s", hdev->name);
2090
2091 read_lock(&hci_task_lock);
2092
2093 while ((skb = skb_dequeue(&hdev->rx_q))) {
2094 if (atomic_read(&hdev->promisc)) {
2095 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002096 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 }
2098
2099 if (test_bit(HCI_RAW, &hdev->flags)) {
2100 kfree_skb(skb);
2101 continue;
2102 }
2103
2104 if (test_bit(HCI_INIT, &hdev->flags)) {
2105 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002106 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 case HCI_ACLDATA_PKT:
2108 case HCI_SCODATA_PKT:
2109 kfree_skb(skb);
2110 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002111 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 }
2113
2114 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002115 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 case HCI_EVENT_PKT:
2117 hci_event_packet(hdev, skb);
2118 break;
2119
2120 case HCI_ACLDATA_PKT:
2121 BT_DBG("%s ACL data packet", hdev->name);
2122 hci_acldata_packet(hdev, skb);
2123 break;
2124
2125 case HCI_SCODATA_PKT:
2126 BT_DBG("%s SCO data packet", hdev->name);
2127 hci_scodata_packet(hdev, skb);
2128 break;
2129
2130 default:
2131 kfree_skb(skb);
2132 break;
2133 }
2134 }
2135
2136 read_unlock(&hci_task_lock);
2137}
2138
2139static void hci_cmd_task(unsigned long arg)
2140{
2141 struct hci_dev *hdev = (struct hci_dev *) arg;
2142 struct sk_buff *skb;
2143
2144 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2145
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002147 if (atomic_read(&hdev->cmd_cnt)) {
2148 skb = skb_dequeue(&hdev->cmd_q);
2149 if (!skb)
2150 return;
2151
Wei Yongjun7585b972009-02-25 18:29:52 +08002152 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002154 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2155 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 atomic_dec(&hdev->cmd_cnt);
2157 hci_send_frame(skb);
Ville Tervo6bd32322011-02-16 16:32:41 +02002158 mod_timer(&hdev->cmd_timer,
2159 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 } else {
2161 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002162 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 }
2164 }
2165}