blob: b6bda3fac10e2a0f821e37a76d60b509e770cf59 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur824530212008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/sock.h>
46
47#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static void hci_cmd_task(unsigned long arg);
57static void hci_rx_task(unsigned long arg);
58static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100126 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100168 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300188 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200194 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
199 BT_DBG("%s %ld", hdev->name, opt);
200
201 /* Driver initialization */
202
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100207
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100209 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 }
211 skb_queue_purge(&hdev->driver_init);
212
213 /* Mandatory initialization */
214
215 /* Reset */
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovan10572132011-03-16 15:36:29 -0300219 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
221 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200224 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200226
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230#if 0
231 /* Host buffer size */
232 {
233 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 }
240#endif
241
242 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
254 /* Optional initialization */
255
256 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200257 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700261 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200263
264 bacpy(&cp.bdaddr, BDADDR_ANY);
265 cp.delete_all = 1;
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267}
268
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300269static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270{
271 BT_DBG("%s", hdev->name);
272
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 scan = opt;
280
281 BT_DBG("%s %x", hdev->name, scan);
282
283 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
287static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __u8 auth = opt;
290
291 BT_DBG("%s %x", hdev->name, auth);
292
293 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295}
296
297static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298{
299 __u8 encrypt = opt;
300
301 BT_DBG("%s %x", hdev->name, encrypt);
302
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305}
306
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200307static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308{
309 __le16 policy = cpu_to_le16(opt);
310
Marcel Holtmanna418b892008-11-30 12:17:28 +0100311 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200312
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315}
316
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900317/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 * Device is held on return. */
319struct hci_dev *hci_dev_get(int index)
320{
321 struct hci_dev *hdev = NULL;
322 struct list_head *p;
323
324 BT_DBG("%d", index);
325
326 if (index < 0)
327 return NULL;
328
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
334 break;
335 }
336 }
337 read_unlock(&hci_dev_list_lock);
338 return hdev;
339}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341/* ---- Inquiry support ---- */
342static void inquiry_cache_flush(struct hci_dev *hdev)
343{
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
346
347 BT_DBG("cache %p", cache);
348
349 cache->list = NULL;
350 while ((e = next)) {
351 next = e->next;
352 kfree(e);
353 }
354}
355
356struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357{
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
360
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
365 break;
366 return e;
367}
368
369void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370{
371 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200372 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200382
383 ie->next = cache->list;
384 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 }
386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 cache->timestamp = jiffies;
390}
391
392static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393{
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
397 int copied = 0;
398
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
407 info++;
408 }
409
410 BT_DBG("cache %p, copied %d", cache, copied);
411 return copied;
412}
413
414static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415{
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
418
419 BT_DBG("%s", hdev->name);
420
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
422 return;
423
424 /* Start Inquiry */
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
431int hci_inquiry(void __user *arg)
432{
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
437 long timeo;
438 __u8 *buf;
439
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
441 return -EFAULT;
442
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200443 hdev = hci_dev_get(ir.dev_id);
444 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 return -ENODEV;
446
447 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 inquiry_cache_flush(hdev);
452 do_inquiry = 1;
453 }
454 hci_dev_unlock_bh(hdev);
455
Marcel Holtmann04837f62006-07-03 10:02:33 +0200456 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200457
458 if (do_inquiry) {
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460 if (err < 0)
461 goto done;
462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
469 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200471 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 err = -ENOMEM;
473 goto done;
474 }
475
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
479
480 BT_DBG("num_rsp %d", ir.num_rsp);
481
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 ptr += sizeof(ir);
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485 ir.num_rsp))
486 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900487 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 err = -EFAULT;
489
490 kfree(buf);
491
492done:
493 hci_dev_put(hdev);
494 return err;
495}
496
497/* ---- HCI ioctl helpers ---- */
498
499int hci_dev_open(__u16 dev)
500{
501 struct hci_dev *hdev;
502 int ret = 0;
503
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200504 hdev = hci_dev_get(dev);
505 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 return -ENODEV;
507
508 BT_DBG("%s %p", hdev->name, hdev);
509
510 hci_req_lock(hdev);
511
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513 ret = -ERFKILL;
514 goto done;
515 }
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 if (test_bit(HCI_UP, &hdev->flags)) {
518 ret = -EALREADY;
519 goto done;
520 }
521
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
524
Marcel Holtmann943da252010-02-13 02:28:41 +0100525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
528
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 if (hdev->open(hdev)) {
530 ret = -EIO;
531 goto done;
532 }
533
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200537 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Marcel Holtmann04837f62006-07-03 10:02:33 +0200539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 clear_bit(HCI_INIT, &hdev->flags);
547 }
548
549 if (!ret) {
550 hci_dev_hold(hdev);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900555 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
560
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
563
564 if (hdev->flush)
565 hdev->flush(hdev);
566
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
570 }
571
572 hdev->close(hdev);
573 hdev->flags = 0;
574 }
575
576done:
577 hci_req_unlock(hdev);
578 hci_dev_put(hdev);
579 return ret;
580}
581
582static int hci_dev_do_close(struct hci_dev *hdev)
583{
584 BT_DBG("%s %p", hdev->name, hdev);
585
586 hci_req_cancel(hdev, ENODEV);
587 hci_req_lock(hdev);
588
Thomas Gleixner6f5ef992011-03-24 20:16:42 +0100589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300593 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 hci_req_unlock(hdev);
595 return 0;
596 }
597
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
601
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
606
607 hci_notify(hdev, HCI_DEV_DOWN);
608
609 if (hdev->flush)
610 hdev->flush(hdev);
611
612 /* Reset device */
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 clear_bit(HCI_INIT, &hdev->flags);
620 }
621
622 /* Kill cmd task */
623 tasklet_kill(&hdev->cmd_task);
624
625 /* Drop queues */
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
629
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 /* After this point our queues are empty
637 * and no tasks are scheduled. */
638 hdev->close(hdev);
639
Johan Hedberg5add6af2010-12-16 10:00:37 +0200640 mgmt_powered(hdev->id, 0);
641
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 /* Clear flags */
643 hdev->flags = 0;
644
645 hci_req_unlock(hdev);
646
647 hci_dev_put(hdev);
648 return 0;
649}
650
651int hci_dev_close(__u16 dev)
652{
653 struct hci_dev *hdev;
654 int err;
655
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200656 hdev = hci_dev_get(dev);
657 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return -ENODEV;
659 err = hci_dev_do_close(hdev);
660 hci_dev_put(hdev);
661 return err;
662}
663
664int hci_dev_reset(__u16 dev)
665{
666 struct hci_dev *hdev;
667 int ret = 0;
668
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200669 hdev = hci_dev_get(dev);
670 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 return -ENODEV;
672
673 hci_req_lock(hdev);
674 tasklet_disable(&hdev->tx_task);
675
676 if (!test_bit(HCI_UP, &hdev->flags))
677 goto done;
678
679 /* Drop queues */
680 skb_queue_purge(&hdev->rx_q);
681 skb_queue_purge(&hdev->cmd_q);
682
683 hci_dev_lock_bh(hdev);
684 inquiry_cache_flush(hdev);
685 hci_conn_hash_flush(hdev);
686 hci_dev_unlock_bh(hdev);
687
688 if (hdev->flush)
689 hdev->flush(hdev);
690
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900691 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300692 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200695 ret = __hci_request(hdev, hci_reset_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698done:
699 tasklet_enable(&hdev->tx_task);
700 hci_req_unlock(hdev);
701 hci_dev_put(hdev);
702 return ret;
703}
704
705int hci_dev_reset_stat(__u16 dev)
706{
707 struct hci_dev *hdev;
708 int ret = 0;
709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200710 hdev = hci_dev_get(dev);
711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return -ENODEV;
713
714 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716 hci_dev_put(hdev);
717
718 return ret;
719}
720
721int hci_dev_cmd(unsigned int cmd, void __user *arg)
722{
723 struct hci_dev *hdev;
724 struct hci_dev_req dr;
725 int err = 0;
726
727 if (copy_from_user(&dr, arg, sizeof(dr)))
728 return -EFAULT;
729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200730 hdev = hci_dev_get(dr.dev_id);
731 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 return -ENODEV;
733
734 switch (cmd) {
735 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200736 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 break;
739
740 case HCISETENCRYPT:
741 if (!lmp_encrypt_capable(hdev)) {
742 err = -EOPNOTSUPP;
743 break;
744 }
745
746 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200748 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 if (err)
751 break;
752 }
753
Marcel Holtmann04837f62006-07-03 10:02:33 +0200754 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 break;
757
758 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200759 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 break;
762
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763 case HCISETLINKPOL:
764 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
766 break;
767
768 case HCISETLINKMODE:
769 hdev->link_mode = ((__u16) dr.dev_opt) &
770 (HCI_LM_MASTER | HCI_LM_ACCEPT);
771 break;
772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 case HCISETPTYPE:
774 hdev->pkt_type = (__u16) dr.dev_opt;
775 break;
776
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200778 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
779 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 break;
781
782 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200783 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 default:
788 err = -EINVAL;
789 break;
790 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 hci_dev_put(hdev);
793 return err;
794}
795
796int hci_get_dev_list(void __user *arg)
797{
798 struct hci_dev_list_req *dl;
799 struct hci_dev_req *dr;
800 struct list_head *p;
801 int n = 0, size, err;
802 __u16 dev_num;
803
804 if (get_user(dev_num, (__u16 __user *) arg))
805 return -EFAULT;
806
807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808 return -EINVAL;
809
810 size = sizeof(*dl) + dev_num * sizeof(*dr);
811
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200812 dl = kzalloc(size, GFP_KERNEL);
813 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 return -ENOMEM;
815
816 dr = dl->dev_req;
817
818 read_lock_bh(&hci_dev_list_lock);
819 list_for_each(p, &hci_dev_list) {
820 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200821
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200823
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200824 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200825
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (++n >= dev_num)
833 break;
834 }
835 read_unlock_bh(&hci_dev_list_lock);
836
837 dl->dev_num = n;
838 size = sizeof(*dl) + n * sizeof(*dr);
839
840 err = copy_to_user(arg, dl, size);
841 kfree(dl);
842
843 return err ? -EFAULT : 0;
844}
845
846int hci_get_dev_info(void __user *arg)
847{
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
850 int err = 0;
851
852 if (copy_from_user(&di, arg, sizeof(di)))
853 return -EFAULT;
854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200855 hdev = hci_dev_get(di.dev_id);
856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -ENODEV;
858
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200859 hci_del_off_timer(hdev);
860
Johan Hedbergc542a062011-01-26 13:11:03 +0200861 if (!test_bit(HCI_MGMT, &hdev->flags))
862 set_bit(HCI_PAIRABLE, &hdev->flags);
863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 strcpy(di.name, hdev->name);
865 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100866 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 di.flags = hdev->flags;
868 di.pkt_type = hdev->pkt_type;
869 di.acl_mtu = hdev->acl_mtu;
870 di.acl_pkts = hdev->acl_pkts;
871 di.sco_mtu = hdev->sco_mtu;
872 di.sco_pkts = hdev->sco_pkts;
873 di.link_policy = hdev->link_policy;
874 di.link_mode = hdev->link_mode;
875
876 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877 memcpy(&di.features, &hdev->features, sizeof(di.features));
878
879 if (copy_to_user(arg, &di, sizeof(di)))
880 err = -EFAULT;
881
882 hci_dev_put(hdev);
883
884 return err;
885}
886
887/* ---- Interface to HCI drivers ---- */
888
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200889static int hci_rfkill_set_block(void *data, bool blocked)
890{
891 struct hci_dev *hdev = data;
892
893 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
894
895 if (!blocked)
896 return 0;
897
898 hci_dev_do_close(hdev);
899
900 return 0;
901}
902
903static const struct rfkill_ops hci_rfkill_ops = {
904 .set_block = hci_rfkill_set_block,
905};
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907/* Alloc HCI device */
908struct hci_dev *hci_alloc_dev(void)
909{
910 struct hci_dev *hdev;
911
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200912 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 if (!hdev)
914 return NULL;
915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 skb_queue_head_init(&hdev->driver_init);
917
918 return hdev;
919}
920EXPORT_SYMBOL(hci_alloc_dev);
921
922/* Free HCI device */
923void hci_free_dev(struct hci_dev *hdev)
924{
925 skb_queue_purge(&hdev->driver_init);
926
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200927 /* will free via device release */
928 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929}
930EXPORT_SYMBOL(hci_free_dev);
931
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200932static void hci_power_on(struct work_struct *work)
933{
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936 BT_DBG("%s", hdev->name);
937
938 if (hci_dev_open(hdev->id) < 0)
939 return;
940
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 mod_timer(&hdev->off_timer,
943 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 mgmt_index_added(hdev->id);
947}
948
949static void hci_power_off(struct work_struct *work)
950{
951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
952
953 BT_DBG("%s", hdev->name);
954
955 hci_dev_close(hdev->id);
956}
957
958static void hci_auto_off(unsigned long data)
959{
960 struct hci_dev *hdev = (struct hci_dev *) data;
961
962 BT_DBG("%s", hdev->name);
963
964 clear_bit(HCI_AUTO_OFF, &hdev->flags);
965
966 queue_work(hdev->workqueue, &hdev->power_off);
967}
968
969void hci_del_off_timer(struct hci_dev *hdev)
970{
971 BT_DBG("%s", hdev->name);
972
973 clear_bit(HCI_AUTO_OFF, &hdev->flags);
974 del_timer(&hdev->off_timer);
975}
976
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200977int hci_uuids_clear(struct hci_dev *hdev)
978{
979 struct list_head *p, *n;
980
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
983
984 uuid = list_entry(p, struct bt_uuid, list);
985
986 list_del(p);
987 kfree(uuid);
988 }
989
990 return 0;
991}
992
Johan Hedberg55ed8ca12011-01-17 14:41:05 +0200993int hci_link_keys_clear(struct hci_dev *hdev)
994{
995 struct list_head *p, *n;
996
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
999
1000 key = list_entry(p, struct link_key, list);
1001
1002 list_del(p);
1003 kfree(key);
1004 }
1005
1006 return 0;
1007}
1008
1009struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1010{
1011 struct list_head *p;
1012
1013 list_for_each(p, &hdev->link_keys) {
1014 struct link_key *k;
1015
1016 k = list_entry(p, struct link_key, list);
1017
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019 return k;
1020 }
1021
1022 return NULL;
1023}
1024
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001025static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026 u8 key_type, u8 old_key_type)
1027{
1028 /* Legacy key */
1029 if (key_type < 0x03)
1030 return 1;
1031
1032 /* Debug keys are insecure so don't store them persistently */
1033 if (key_type == HCI_LK_DEBUG_COMBINATION)
1034 return 0;
1035
1036 /* Changed combination key and there's no previous one */
1037 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1038 return 0;
1039
1040 /* Security mode 3 case */
1041 if (!conn)
1042 return 1;
1043
1044 /* Neither local nor remote side had no-bonding as requirement */
1045 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1046 return 1;
1047
1048 /* Local side had dedicated bonding as requirement */
1049 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1050 return 1;
1051
1052 /* Remote side had dedicated bonding as requirement */
1053 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1054 return 1;
1055
1056 /* If none of the above criteria match, then don't store the key
1057 * persistently */
1058 return 0;
1059}
1060
1061int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1062 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001063{
1064 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001065 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001066
1067 old_key = hci_find_link_key(hdev, bdaddr);
1068 if (old_key) {
1069 old_key_type = old_key->type;
1070 key = old_key;
1071 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001072 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001073 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1074 if (!key)
1075 return -ENOMEM;
1076 list_add(&key->list, &hdev->link_keys);
1077 }
1078
1079 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1080
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001081 /* Some buggy controller combinations generate a changed
1082 * combination key for legacy pairing even when there's no
1083 * previous key */
1084 if (type == HCI_LK_CHANGED_COMBINATION &&
1085 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001086 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001087 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001088 if (conn)
1089 conn->key_type = type;
1090 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001091
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001092 bacpy(&key->bdaddr, bdaddr);
1093 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001094 key->pin_len = pin_len;
1095
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001096 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001097 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001098 else
1099 key->type = type;
1100
Johan Hedberg4df378a2011-04-28 11:29:03 -07001101 if (!new_key)
1102 return 0;
1103
1104 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1105
1106 mgmt_new_key(hdev->id, key, persistent);
1107
1108 if (!persistent) {
1109 list_del(&key->list);
1110 kfree(key);
1111 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001112
1113 return 0;
1114}
1115
1116int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1117{
1118 struct link_key *key;
1119
1120 key = hci_find_link_key(hdev, bdaddr);
1121 if (!key)
1122 return -ENOENT;
1123
1124 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1125
1126 list_del(&key->list);
1127 kfree(key);
1128
1129 return 0;
1130}
1131
Ville Tervo6bd32322011-02-16 16:32:41 +02001132/* HCI command timer function */
1133static void hci_cmd_timer(unsigned long arg)
1134{
1135 struct hci_dev *hdev = (void *) arg;
1136
1137 BT_ERR("%s command tx timeout", hdev->name);
1138 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovan10572132011-03-16 15:36:29 -03001139 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001140 tasklet_schedule(&hdev->cmd_task);
1141}
1142
Szymon Janc2763eda2011-03-22 13:12:22 +01001143struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1144 bdaddr_t *bdaddr)
1145{
1146 struct oob_data *data;
1147
1148 list_for_each_entry(data, &hdev->remote_oob_data, list)
1149 if (bacmp(bdaddr, &data->bdaddr) == 0)
1150 return data;
1151
1152 return NULL;
1153}
1154
1155int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1156{
1157 struct oob_data *data;
1158
1159 data = hci_find_remote_oob_data(hdev, bdaddr);
1160 if (!data)
1161 return -ENOENT;
1162
1163 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1164
1165 list_del(&data->list);
1166 kfree(data);
1167
1168 return 0;
1169}
1170
1171int hci_remote_oob_data_clear(struct hci_dev *hdev)
1172{
1173 struct oob_data *data, *n;
1174
1175 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1176 list_del(&data->list);
1177 kfree(data);
1178 }
1179
1180 return 0;
1181}
1182
1183int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1184 u8 *randomizer)
1185{
1186 struct oob_data *data;
1187
1188 data = hci_find_remote_oob_data(hdev, bdaddr);
1189
1190 if (!data) {
1191 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1192 if (!data)
1193 return -ENOMEM;
1194
1195 bacpy(&data->bdaddr, bdaddr);
1196 list_add(&data->list, &hdev->remote_oob_data);
1197 }
1198
1199 memcpy(data->hash, hash, sizeof(data->hash));
1200 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1201
1202 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1203
1204 return 0;
1205}
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/* Register HCI device */
1208int hci_register_dev(struct hci_dev *hdev)
1209{
1210 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001211 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001213 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1214 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 if (!hdev->open || !hdev->close || !hdev->destruct)
1217 return -EINVAL;
1218
1219 write_lock_bh(&hci_dev_list_lock);
1220
1221 /* Find first available device id */
1222 list_for_each(p, &hci_dev_list) {
1223 if (list_entry(p, struct hci_dev, list)->id != id)
1224 break;
1225 head = p; id++;
1226 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001227
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 sprintf(hdev->name, "hci%d", id);
1229 hdev->id = id;
1230 list_add(&hdev->list, head);
1231
1232 atomic_set(&hdev->refcnt, 1);
1233 spin_lock_init(&hdev->lock);
1234
1235 hdev->flags = 0;
1236 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001237 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001239 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Marcel Holtmann04837f62006-07-03 10:02:33 +02001241 hdev->idle_timeout = 0;
1242 hdev->sniff_max_interval = 800;
1243 hdev->sniff_min_interval = 80;
1244
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001245 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1247 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1248
1249 skb_queue_head_init(&hdev->rx_q);
1250 skb_queue_head_init(&hdev->cmd_q);
1251 skb_queue_head_init(&hdev->raw_q);
1252
Ville Tervo6bd32322011-02-16 16:32:41 +02001253 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1254
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301255 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001256 hdev->reassembly[i] = NULL;
1257
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001259 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 inquiry_cache_init(hdev);
1262
1263 hci_conn_hash_init(hdev);
1264
David Millerea4bd8b2010-07-30 21:54:49 -07001265 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001266
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001267 INIT_LIST_HEAD(&hdev->uuids);
1268
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001269 INIT_LIST_HEAD(&hdev->link_keys);
1270
Szymon Janc2763eda2011-03-22 13:12:22 +01001271 INIT_LIST_HEAD(&hdev->remote_oob_data);
1272
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001273 INIT_WORK(&hdev->power_on, hci_power_on);
1274 INIT_WORK(&hdev->power_off, hci_power_off);
1275 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1276
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1278
1279 atomic_set(&hdev->promisc, 0);
1280
1281 write_unlock_bh(&hci_dev_list_lock);
1282
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001283 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1284 if (!hdev->workqueue)
1285 goto nomem;
1286
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 hci_register_sysfs(hdev);
1288
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001289 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1290 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1291 if (hdev->rfkill) {
1292 if (rfkill_register(hdev->rfkill) < 0) {
1293 rfkill_destroy(hdev->rfkill);
1294 hdev->rfkill = NULL;
1295 }
1296 }
1297
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001298 set_bit(HCI_AUTO_OFF, &hdev->flags);
1299 set_bit(HCI_SETUP, &hdev->flags);
1300 queue_work(hdev->workqueue, &hdev->power_on);
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 hci_notify(hdev, HCI_DEV_REG);
1303
1304 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001305
1306nomem:
1307 write_lock_bh(&hci_dev_list_lock);
1308 list_del(&hdev->list);
1309 write_unlock_bh(&hci_dev_list_lock);
1310
1311 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312}
1313EXPORT_SYMBOL(hci_register_dev);
1314
1315/* Unregister HCI device */
1316int hci_unregister_dev(struct hci_dev *hdev)
1317{
Marcel Holtmannef222012007-07-11 06:42:04 +02001318 int i;
1319
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001320 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 write_lock_bh(&hci_dev_list_lock);
1323 list_del(&hdev->list);
1324 write_unlock_bh(&hci_dev_list_lock);
1325
1326 hci_dev_do_close(hdev);
1327
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301328 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001329 kfree_skb(hdev->reassembly[i]);
1330
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001331 if (!test_bit(HCI_INIT, &hdev->flags) &&
1332 !test_bit(HCI_SETUP, &hdev->flags))
1333 mgmt_index_removed(hdev->id);
1334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 hci_notify(hdev, HCI_DEV_UNREG);
1336
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001337 if (hdev->rfkill) {
1338 rfkill_unregister(hdev->rfkill);
1339 rfkill_destroy(hdev->rfkill);
1340 }
1341
Dave Young147e2d52008-03-05 18:45:59 -08001342 hci_unregister_sysfs(hdev);
1343
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001344 hci_del_off_timer(hdev);
1345
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001346 destroy_workqueue(hdev->workqueue);
1347
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001348 hci_dev_lock_bh(hdev);
1349 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001350 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001351 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001352 hci_remote_oob_data_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001353 hci_dev_unlock_bh(hdev);
1354
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 return 0;
1358}
1359EXPORT_SYMBOL(hci_unregister_dev);
1360
1361/* Suspend HCI device */
1362int hci_suspend_dev(struct hci_dev *hdev)
1363{
1364 hci_notify(hdev, HCI_DEV_SUSPEND);
1365 return 0;
1366}
1367EXPORT_SYMBOL(hci_suspend_dev);
1368
1369/* Resume HCI device */
1370int hci_resume_dev(struct hci_dev *hdev)
1371{
1372 hci_notify(hdev, HCI_DEV_RESUME);
1373 return 0;
1374}
1375EXPORT_SYMBOL(hci_resume_dev);
1376
Marcel Holtmann76bca882009-11-18 00:40:39 +01001377/* Receive frame from HCI drivers */
1378int hci_recv_frame(struct sk_buff *skb)
1379{
1380 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1381 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1382 && !test_bit(HCI_INIT, &hdev->flags))) {
1383 kfree_skb(skb);
1384 return -ENXIO;
1385 }
1386
1387 /* Incomming skb */
1388 bt_cb(skb)->incoming = 1;
1389
1390 /* Time stamp */
1391 __net_timestamp(skb);
1392
1393 /* Queue frame for rx task */
1394 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001395 tasklet_schedule(&hdev->rx_task);
1396
Marcel Holtmann76bca882009-11-18 00:40:39 +01001397 return 0;
1398}
1399EXPORT_SYMBOL(hci_recv_frame);
1400
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301401static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001402 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301403{
1404 int len = 0;
1405 int hlen = 0;
1406 int remain = count;
1407 struct sk_buff *skb;
1408 struct bt_skb_cb *scb;
1409
1410 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1411 index >= NUM_REASSEMBLY)
1412 return -EILSEQ;
1413
1414 skb = hdev->reassembly[index];
1415
1416 if (!skb) {
1417 switch (type) {
1418 case HCI_ACLDATA_PKT:
1419 len = HCI_MAX_FRAME_SIZE;
1420 hlen = HCI_ACL_HDR_SIZE;
1421 break;
1422 case HCI_EVENT_PKT:
1423 len = HCI_MAX_EVENT_SIZE;
1424 hlen = HCI_EVENT_HDR_SIZE;
1425 break;
1426 case HCI_SCODATA_PKT:
1427 len = HCI_MAX_SCO_SIZE;
1428 hlen = HCI_SCO_HDR_SIZE;
1429 break;
1430 }
1431
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001432 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301433 if (!skb)
1434 return -ENOMEM;
1435
1436 scb = (void *) skb->cb;
1437 scb->expect = hlen;
1438 scb->pkt_type = type;
1439
1440 skb->dev = (void *) hdev;
1441 hdev->reassembly[index] = skb;
1442 }
1443
1444 while (count) {
1445 scb = (void *) skb->cb;
1446 len = min(scb->expect, (__u16)count);
1447
1448 memcpy(skb_put(skb, len), data, len);
1449
1450 count -= len;
1451 data += len;
1452 scb->expect -= len;
1453 remain = count;
1454
1455 switch (type) {
1456 case HCI_EVENT_PKT:
1457 if (skb->len == HCI_EVENT_HDR_SIZE) {
1458 struct hci_event_hdr *h = hci_event_hdr(skb);
1459 scb->expect = h->plen;
1460
1461 if (skb_tailroom(skb) < scb->expect) {
1462 kfree_skb(skb);
1463 hdev->reassembly[index] = NULL;
1464 return -ENOMEM;
1465 }
1466 }
1467 break;
1468
1469 case HCI_ACLDATA_PKT:
1470 if (skb->len == HCI_ACL_HDR_SIZE) {
1471 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1472 scb->expect = __le16_to_cpu(h->dlen);
1473
1474 if (skb_tailroom(skb) < scb->expect) {
1475 kfree_skb(skb);
1476 hdev->reassembly[index] = NULL;
1477 return -ENOMEM;
1478 }
1479 }
1480 break;
1481
1482 case HCI_SCODATA_PKT:
1483 if (skb->len == HCI_SCO_HDR_SIZE) {
1484 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1485 scb->expect = h->dlen;
1486
1487 if (skb_tailroom(skb) < scb->expect) {
1488 kfree_skb(skb);
1489 hdev->reassembly[index] = NULL;
1490 return -ENOMEM;
1491 }
1492 }
1493 break;
1494 }
1495
1496 if (scb->expect == 0) {
1497 /* Complete frame */
1498
1499 bt_cb(skb)->pkt_type = type;
1500 hci_recv_frame(skb);
1501
1502 hdev->reassembly[index] = NULL;
1503 return remain;
1504 }
1505 }
1506
1507 return remain;
1508}
1509
Marcel Holtmannef222012007-07-11 06:42:04 +02001510int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1511{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301512 int rem = 0;
1513
Marcel Holtmannef222012007-07-11 06:42:04 +02001514 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1515 return -EILSEQ;
1516
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001517 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001518 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301519 if (rem < 0)
1520 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001521
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301522 data += (count - rem);
1523 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001524 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001525
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301526 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001527}
1528EXPORT_SYMBOL(hci_recv_fragment);
1529
Suraj Sumangala99811512010-07-14 13:02:19 +05301530#define STREAM_REASSEMBLY 0
1531
1532int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1533{
1534 int type;
1535 int rem = 0;
1536
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001537 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301538 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1539
1540 if (!skb) {
1541 struct { char type; } *pkt;
1542
1543 /* Start of the frame */
1544 pkt = data;
1545 type = pkt->type;
1546
1547 data++;
1548 count--;
1549 } else
1550 type = bt_cb(skb)->pkt_type;
1551
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001552 rem = hci_reassembly(hdev, type, data, count,
1553 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301554 if (rem < 0)
1555 return rem;
1556
1557 data += (count - rem);
1558 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001559 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301560
1561 return rem;
1562}
1563EXPORT_SYMBOL(hci_recv_stream_fragment);
1564
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565/* ---- Interface to upper protocols ---- */
1566
1567/* Register/Unregister protocols.
1568 * hci_task_lock is used to ensure that no tasks are running. */
1569int hci_register_proto(struct hci_proto *hp)
1570{
1571 int err = 0;
1572
1573 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1574
1575 if (hp->id >= HCI_MAX_PROTO)
1576 return -EINVAL;
1577
1578 write_lock_bh(&hci_task_lock);
1579
1580 if (!hci_proto[hp->id])
1581 hci_proto[hp->id] = hp;
1582 else
1583 err = -EEXIST;
1584
1585 write_unlock_bh(&hci_task_lock);
1586
1587 return err;
1588}
1589EXPORT_SYMBOL(hci_register_proto);
1590
1591int hci_unregister_proto(struct hci_proto *hp)
1592{
1593 int err = 0;
1594
1595 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1596
1597 if (hp->id >= HCI_MAX_PROTO)
1598 return -EINVAL;
1599
1600 write_lock_bh(&hci_task_lock);
1601
1602 if (hci_proto[hp->id])
1603 hci_proto[hp->id] = NULL;
1604 else
1605 err = -ENOENT;
1606
1607 write_unlock_bh(&hci_task_lock);
1608
1609 return err;
1610}
1611EXPORT_SYMBOL(hci_unregister_proto);
1612
1613int hci_register_cb(struct hci_cb *cb)
1614{
1615 BT_DBG("%p name %s", cb, cb->name);
1616
1617 write_lock_bh(&hci_cb_list_lock);
1618 list_add(&cb->list, &hci_cb_list);
1619 write_unlock_bh(&hci_cb_list_lock);
1620
1621 return 0;
1622}
1623EXPORT_SYMBOL(hci_register_cb);
1624
1625int hci_unregister_cb(struct hci_cb *cb)
1626{
1627 BT_DBG("%p name %s", cb, cb->name);
1628
1629 write_lock_bh(&hci_cb_list_lock);
1630 list_del(&cb->list);
1631 write_unlock_bh(&hci_cb_list_lock);
1632
1633 return 0;
1634}
1635EXPORT_SYMBOL(hci_unregister_cb);
1636
1637static int hci_send_frame(struct sk_buff *skb)
1638{
1639 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1640
1641 if (!hdev) {
1642 kfree_skb(skb);
1643 return -ENODEV;
1644 }
1645
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001646 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648 if (atomic_read(&hdev->promisc)) {
1649 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001650 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001652 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 }
1654
1655 /* Get rid of skb owner, prior to sending to the driver. */
1656 skb_orphan(skb);
1657
1658 return hdev->send(skb);
1659}
1660
1661/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001662int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663{
1664 int len = HCI_COMMAND_HDR_SIZE + plen;
1665 struct hci_command_hdr *hdr;
1666 struct sk_buff *skb;
1667
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001668 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669
1670 skb = bt_skb_alloc(len, GFP_ATOMIC);
1671 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001672 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 return -ENOMEM;
1674 }
1675
1676 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001677 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 hdr->plen = plen;
1679
1680 if (plen)
1681 memcpy(skb_put(skb, plen), param, plen);
1682
1683 BT_DBG("skb len %d", skb->len);
1684
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001685 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001687
Johan Hedberga5040ef2011-01-10 13:28:59 +02001688 if (test_bit(HCI_INIT, &hdev->flags))
1689 hdev->init_last_cmd = opcode;
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001692 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
1694 return 0;
1695}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001698void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699{
1700 struct hci_command_hdr *hdr;
1701
1702 if (!hdev->sent_cmd)
1703 return NULL;
1704
1705 hdr = (void *) hdev->sent_cmd->data;
1706
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001707 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 return NULL;
1709
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001710 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1713}
1714
1715/* Send ACL data */
1716static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1717{
1718 struct hci_acl_hdr *hdr;
1719 int len = skb->len;
1720
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001721 skb_push(skb, HCI_ACL_HDR_SIZE);
1722 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001723 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001724 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1725 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726}
1727
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001728void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729{
1730 struct hci_dev *hdev = conn->hdev;
1731 struct sk_buff *list;
1732
1733 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1734
1735 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001736 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001737 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739 list = skb_shinfo(skb)->frag_list;
1740 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 /* Non fragmented */
1742 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1743
1744 skb_queue_tail(&conn->data_q, skb);
1745 } else {
1746 /* Fragmented */
1747 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1748
1749 skb_shinfo(skb)->frag_list = NULL;
1750
1751 /* Queue all fragments atomically */
1752 spin_lock_bh(&conn->data_q.lock);
1753
1754 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001755
1756 flags &= ~ACL_START;
1757 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 do {
1759 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001762 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001763 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
1765 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1766
1767 __skb_queue_tail(&conn->data_q, skb);
1768 } while (list);
1769
1770 spin_unlock_bh(&conn->data_q.lock);
1771 }
1772
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001773 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774}
1775EXPORT_SYMBOL(hci_send_acl);
1776
1777/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001778void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779{
1780 struct hci_dev *hdev = conn->hdev;
1781 struct hci_sco_hdr hdr;
1782
1783 BT_DBG("%s len %d", hdev->name, skb->len);
1784
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001785 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 hdr.dlen = skb->len;
1787
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001788 skb_push(skb, HCI_SCO_HDR_SIZE);
1789 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001790 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
1792 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001793 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001796 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797}
1798EXPORT_SYMBOL(hci_send_sco);
1799
1800/* ---- HCI TX task (outgoing data) ---- */
1801
1802/* HCI Connection scheduler */
1803static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1804{
1805 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001806 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 int num = 0, min = ~0;
1808 struct list_head *p;
1809
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001810 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 * added and removed with TX task disabled. */
1812 list_for_each(p, &h->list) {
1813 struct hci_conn *c;
1814 c = list_entry(p, struct hci_conn, list);
1815
Marcel Holtmann769be972008-07-14 20:13:49 +02001816 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001818
1819 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1820 continue;
1821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 num++;
1823
1824 if (c->sent < min) {
1825 min = c->sent;
1826 conn = c;
1827 }
1828 }
1829
1830 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001831 int cnt, q;
1832
1833 switch (conn->type) {
1834 case ACL_LINK:
1835 cnt = hdev->acl_cnt;
1836 break;
1837 case SCO_LINK:
1838 case ESCO_LINK:
1839 cnt = hdev->sco_cnt;
1840 break;
1841 case LE_LINK:
1842 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1843 break;
1844 default:
1845 cnt = 0;
1846 BT_ERR("Unknown link type");
1847 }
1848
1849 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 *quote = q ? q : 1;
1851 } else
1852 *quote = 0;
1853
1854 BT_DBG("conn %p quote %d", conn, *quote);
1855 return conn;
1856}
1857
Ville Tervobae1f5d92011-02-10 22:38:53 -03001858static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859{
1860 struct hci_conn_hash *h = &hdev->conn_hash;
1861 struct list_head *p;
1862 struct hci_conn *c;
1863
Ville Tervobae1f5d92011-02-10 22:38:53 -03001864 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
1866 /* Kill stalled connections */
1867 list_for_each(p, &h->list) {
1868 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d92011-02-10 22:38:53 -03001869 if (c->type == type && c->sent) {
1870 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 hdev->name, batostr(&c->dst));
1872 hci_acl_disconn(c, 0x13);
1873 }
1874 }
1875}
1876
1877static inline void hci_sched_acl(struct hci_dev *hdev)
1878{
1879 struct hci_conn *conn;
1880 struct sk_buff *skb;
1881 int quote;
1882
1883 BT_DBG("%s", hdev->name);
1884
1885 if (!test_bit(HCI_RAW, &hdev->flags)) {
1886 /* ACL tx timeout must be longer than maximum
1887 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08001888 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03001889 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 }
1891
1892 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1893 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1894 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001895
1896 hci_conn_enter_active_mode(conn);
1897
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 hci_send_frame(skb);
1899 hdev->acl_last_tx = jiffies;
1900
1901 hdev->acl_cnt--;
1902 conn->sent++;
1903 }
1904 }
1905}
1906
1907/* Schedule SCO */
1908static inline void hci_sched_sco(struct hci_dev *hdev)
1909{
1910 struct hci_conn *conn;
1911 struct sk_buff *skb;
1912 int quote;
1913
1914 BT_DBG("%s", hdev->name);
1915
1916 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1917 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1918 BT_DBG("skb %p len %d", skb, skb->len);
1919 hci_send_frame(skb);
1920
1921 conn->sent++;
1922 if (conn->sent == ~0)
1923 conn->sent = 0;
1924 }
1925 }
1926}
1927
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001928static inline void hci_sched_esco(struct hci_dev *hdev)
1929{
1930 struct hci_conn *conn;
1931 struct sk_buff *skb;
1932 int quote;
1933
1934 BT_DBG("%s", hdev->name);
1935
1936 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1937 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1938 BT_DBG("skb %p len %d", skb, skb->len);
1939 hci_send_frame(skb);
1940
1941 conn->sent++;
1942 if (conn->sent == ~0)
1943 conn->sent = 0;
1944 }
1945 }
1946}
1947
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001948static inline void hci_sched_le(struct hci_dev *hdev)
1949{
1950 struct hci_conn *conn;
1951 struct sk_buff *skb;
1952 int quote, cnt;
1953
1954 BT_DBG("%s", hdev->name);
1955
1956 if (!test_bit(HCI_RAW, &hdev->flags)) {
1957 /* LE tx timeout must be longer than maximum
1958 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03001959 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001960 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03001961 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001962 }
1963
1964 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1965 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1966 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1967 BT_DBG("skb %p len %d", skb, skb->len);
1968
1969 hci_send_frame(skb);
1970 hdev->le_last_tx = jiffies;
1971
1972 cnt--;
1973 conn->sent++;
1974 }
1975 }
1976 if (hdev->le_pkts)
1977 hdev->le_cnt = cnt;
1978 else
1979 hdev->acl_cnt = cnt;
1980}
1981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982static void hci_tx_task(unsigned long arg)
1983{
1984 struct hci_dev *hdev = (struct hci_dev *) arg;
1985 struct sk_buff *skb;
1986
1987 read_lock(&hci_task_lock);
1988
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001989 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1990 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
1992 /* Schedule queues and send stuff to HCI driver */
1993
1994 hci_sched_acl(hdev);
1995
1996 hci_sched_sco(hdev);
1997
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001998 hci_sched_esco(hdev);
1999
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002000 hci_sched_le(hdev);
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 /* Send next queued raw (unknown type) packet */
2003 while ((skb = skb_dequeue(&hdev->raw_q)))
2004 hci_send_frame(skb);
2005
2006 read_unlock(&hci_task_lock);
2007}
2008
2009/* ----- HCI RX task (incoming data proccessing) ----- */
2010
2011/* ACL data packet */
2012static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2013{
2014 struct hci_acl_hdr *hdr = (void *) skb->data;
2015 struct hci_conn *conn;
2016 __u16 handle, flags;
2017
2018 skb_pull(skb, HCI_ACL_HDR_SIZE);
2019
2020 handle = __le16_to_cpu(hdr->handle);
2021 flags = hci_flags(handle);
2022 handle = hci_handle(handle);
2023
2024 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2025
2026 hdev->stat.acl_rx++;
2027
2028 hci_dev_lock(hdev);
2029 conn = hci_conn_hash_lookup_handle(hdev, handle);
2030 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002031
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 if (conn) {
2033 register struct hci_proto *hp;
2034
Marcel Holtmann04837f62006-07-03 10:02:33 +02002035 hci_conn_enter_active_mode(conn);
2036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002038 hp = hci_proto[HCI_PROTO_L2CAP];
2039 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 hp->recv_acldata(conn, skb, flags);
2041 return;
2042 }
2043 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002044 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045 hdev->name, handle);
2046 }
2047
2048 kfree_skb(skb);
2049}
2050
2051/* SCO data packet */
2052static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2053{
2054 struct hci_sco_hdr *hdr = (void *) skb->data;
2055 struct hci_conn *conn;
2056 __u16 handle;
2057
2058 skb_pull(skb, HCI_SCO_HDR_SIZE);
2059
2060 handle = __le16_to_cpu(hdr->handle);
2061
2062 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2063
2064 hdev->stat.sco_rx++;
2065
2066 hci_dev_lock(hdev);
2067 conn = hci_conn_hash_lookup_handle(hdev, handle);
2068 hci_dev_unlock(hdev);
2069
2070 if (conn) {
2071 register struct hci_proto *hp;
2072
2073 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002074 hp = hci_proto[HCI_PROTO_SCO];
2075 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 hp->recv_scodata(conn, skb);
2077 return;
2078 }
2079 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002080 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 hdev->name, handle);
2082 }
2083
2084 kfree_skb(skb);
2085}
2086
Marcel Holtmann65164552005-10-28 19:20:48 +02002087static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
2089 struct hci_dev *hdev = (struct hci_dev *) arg;
2090 struct sk_buff *skb;
2091
2092 BT_DBG("%s", hdev->name);
2093
2094 read_lock(&hci_task_lock);
2095
2096 while ((skb = skb_dequeue(&hdev->rx_q))) {
2097 if (atomic_read(&hdev->promisc)) {
2098 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002099 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
2101
2102 if (test_bit(HCI_RAW, &hdev->flags)) {
2103 kfree_skb(skb);
2104 continue;
2105 }
2106
2107 if (test_bit(HCI_INIT, &hdev->flags)) {
2108 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002109 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 case HCI_ACLDATA_PKT:
2111 case HCI_SCODATA_PKT:
2112 kfree_skb(skb);
2113 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 }
2116
2117 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002118 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 case HCI_EVENT_PKT:
2120 hci_event_packet(hdev, skb);
2121 break;
2122
2123 case HCI_ACLDATA_PKT:
2124 BT_DBG("%s ACL data packet", hdev->name);
2125 hci_acldata_packet(hdev, skb);
2126 break;
2127
2128 case HCI_SCODATA_PKT:
2129 BT_DBG("%s SCO data packet", hdev->name);
2130 hci_scodata_packet(hdev, skb);
2131 break;
2132
2133 default:
2134 kfree_skb(skb);
2135 break;
2136 }
2137 }
2138
2139 read_unlock(&hci_task_lock);
2140}
2141
2142static void hci_cmd_task(unsigned long arg)
2143{
2144 struct hci_dev *hdev = (struct hci_dev *) arg;
2145 struct sk_buff *skb;
2146
2147 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002150 if (atomic_read(&hdev->cmd_cnt)) {
2151 skb = skb_dequeue(&hdev->cmd_q);
2152 if (!skb)
2153 return;
2154
Wei Yongjun7585b972009-02-25 18:29:52 +08002155 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002157 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2158 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 atomic_dec(&hdev->cmd_cnt);
2160 hci_send_frame(skb);
Ville Tervo6bd32322011-02-16 16:32:41 +02002161 mod_timer(&hdev->cmd_timer,
2162 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 } else {
2164 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002165 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 }
2167 }
2168}