blob: 183ce81f7a5c45955d73a2a075b41afa5acb6aff [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
193 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800194 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200195 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 BT_DBG("%s %ld", hdev->name, opt);
198
199 /* Driver initialization */
200
201 /* Special commands */
202 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700203 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100207 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 }
209 skb_queue_purge(&hdev->driver_init);
210
211 /* Mandatory initialization */
212
213 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100214 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200215 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200220 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200224 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
226#if 0
227 /* Host buffer size */
228 {
229 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700230 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700232 cp.acl_max_pkt = cpu_to_le16(0xffff);
233 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200234 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
236#endif
237
238 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
240
241 /* Read Class of Device */
242 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
243
244 /* Read Local Name */
245 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
247 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
250 /* Optional initialization */
251
252 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200253 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700257 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700261 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
264
265static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
266{
267 __u8 scan = opt;
268
269 BT_DBG("%s %x", hdev->name, scan);
270
271 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200272 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
274
275static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
276{
277 __u8 auth = opt;
278
279 BT_DBG("%s %x", hdev->name, auth);
280
281 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200282 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283}
284
285static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
286{
287 __u8 encrypt = opt;
288
289 BT_DBG("%s %x", hdev->name, encrypt);
290
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200291 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200292 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200295static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __le16 policy = cpu_to_le16(opt);
298
Marcel Holtmanna418b892008-11-30 12:17:28 +0100299 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200300
301 /* Default link policy */
302 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
303}
304
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900305/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 * Device is held on return. */
307struct hci_dev *hci_dev_get(int index)
308{
309 struct hci_dev *hdev = NULL;
310 struct list_head *p;
311
312 BT_DBG("%d", index);
313
314 if (index < 0)
315 return NULL;
316
317 read_lock(&hci_dev_list_lock);
318 list_for_each(p, &hci_dev_list) {
319 struct hci_dev *d = list_entry(p, struct hci_dev, list);
320 if (d->id == index) {
321 hdev = hci_dev_hold(d);
322 break;
323 }
324 }
325 read_unlock(&hci_dev_list_lock);
326 return hdev;
327}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329/* ---- Inquiry support ---- */
330static void inquiry_cache_flush(struct hci_dev *hdev)
331{
332 struct inquiry_cache *cache = &hdev->inq_cache;
333 struct inquiry_entry *next = cache->list, *e;
334
335 BT_DBG("cache %p", cache);
336
337 cache->list = NULL;
338 while ((e = next)) {
339 next = e->next;
340 kfree(e);
341 }
342}
343
344struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
345{
346 struct inquiry_cache *cache = &hdev->inq_cache;
347 struct inquiry_entry *e;
348
349 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
350
351 for (e = cache->list; e; e = e->next)
352 if (!bacmp(&e->data.bdaddr, bdaddr))
353 break;
354 return e;
355}
356
357void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
358{
359 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200360 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
363
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200364 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
365 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200367 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
368 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200370
371 ie->next = cache->list;
372 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 }
374
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375 memcpy(&ie->data, data, sizeof(*data));
376 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 cache->timestamp = jiffies;
378}
379
380static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
381{
382 struct inquiry_cache *cache = &hdev->inq_cache;
383 struct inquiry_info *info = (struct inquiry_info *) buf;
384 struct inquiry_entry *e;
385 int copied = 0;
386
387 for (e = cache->list; e && copied < num; e = e->next, copied++) {
388 struct inquiry_data *data = &e->data;
389 bacpy(&info->bdaddr, &data->bdaddr);
390 info->pscan_rep_mode = data->pscan_rep_mode;
391 info->pscan_period_mode = data->pscan_period_mode;
392 info->pscan_mode = data->pscan_mode;
393 memcpy(info->dev_class, data->dev_class, 3);
394 info->clock_offset = data->clock_offset;
395 info++;
396 }
397
398 BT_DBG("cache %p, copied %d", cache, copied);
399 return copied;
400}
401
402static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
403{
404 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
405 struct hci_cp_inquiry cp;
406
407 BT_DBG("%s", hdev->name);
408
409 if (test_bit(HCI_INQUIRY, &hdev->flags))
410 return;
411
412 /* Start Inquiry */
413 memcpy(&cp.lap, &ir->lap, 3);
414 cp.length = ir->length;
415 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200416 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
418
419int hci_inquiry(void __user *arg)
420{
421 __u8 __user *ptr = arg;
422 struct hci_inquiry_req ir;
423 struct hci_dev *hdev;
424 int err = 0, do_inquiry = 0, max_rsp;
425 long timeo;
426 __u8 *buf;
427
428 if (copy_from_user(&ir, ptr, sizeof(ir)))
429 return -EFAULT;
430
431 if (!(hdev = hci_dev_get(ir.dev_id)))
432 return -ENODEV;
433
434 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900435 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436 inquiry_cache_empty(hdev) ||
437 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 inquiry_cache_flush(hdev);
439 do_inquiry = 1;
440 }
441 hci_dev_unlock_bh(hdev);
442
Marcel Holtmann04837f62006-07-03 10:02:33 +0200443 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200444
445 if (do_inquiry) {
446 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
447 if (err < 0)
448 goto done;
449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451 /* for unlimited number of responses we will use buffer with 255 entries */
452 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
453
454 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
455 * copy it to the user space.
456 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200457 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
458 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 err = -ENOMEM;
460 goto done;
461 }
462
463 hci_dev_lock_bh(hdev);
464 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
465 hci_dev_unlock_bh(hdev);
466
467 BT_DBG("num_rsp %d", ir.num_rsp);
468
469 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
470 ptr += sizeof(ir);
471 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
472 ir.num_rsp))
473 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900474 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 err = -EFAULT;
476
477 kfree(buf);
478
479done:
480 hci_dev_put(hdev);
481 return err;
482}
483
484/* ---- HCI ioctl helpers ---- */
485
486int hci_dev_open(__u16 dev)
487{
488 struct hci_dev *hdev;
489 int ret = 0;
490
491 if (!(hdev = hci_dev_get(dev)))
492 return -ENODEV;
493
494 BT_DBG("%s %p", hdev->name, hdev);
495
496 hci_req_lock(hdev);
497
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200498 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
499 ret = -ERFKILL;
500 goto done;
501 }
502
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 if (test_bit(HCI_UP, &hdev->flags)) {
504 ret = -EALREADY;
505 goto done;
506 }
507
508 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
509 set_bit(HCI_RAW, &hdev->flags);
510
Marcel Holtmann943da252010-02-13 02:28:41 +0100511 /* Treat all non BR/EDR controllers as raw devices for now */
512 if (hdev->dev_type != HCI_BREDR)
513 set_bit(HCI_RAW, &hdev->flags);
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 if (hdev->open(hdev)) {
516 ret = -EIO;
517 goto done;
518 }
519
520 if (!test_bit(HCI_RAW, &hdev->flags)) {
521 atomic_set(&hdev->cmd_cnt, 1);
522 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200523 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
525 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200526 ret = __hci_request(hdev, hci_init_req, 0,
527 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
529 clear_bit(HCI_INIT, &hdev->flags);
530 }
531
532 if (!ret) {
533 hci_dev_hold(hdev);
534 set_bit(HCI_UP, &hdev->flags);
535 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200536 if (!test_bit(HCI_SETUP, &hdev->flags))
537 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900538 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* Init failed, cleanup */
540 tasklet_kill(&hdev->rx_task);
541 tasklet_kill(&hdev->tx_task);
542 tasklet_kill(&hdev->cmd_task);
543
544 skb_queue_purge(&hdev->cmd_q);
545 skb_queue_purge(&hdev->rx_q);
546
547 if (hdev->flush)
548 hdev->flush(hdev);
549
550 if (hdev->sent_cmd) {
551 kfree_skb(hdev->sent_cmd);
552 hdev->sent_cmd = NULL;
553 }
554
555 hdev->close(hdev);
556 hdev->flags = 0;
557 }
558
559done:
560 hci_req_unlock(hdev);
561 hci_dev_put(hdev);
562 return ret;
563}
564
565static int hci_dev_do_close(struct hci_dev *hdev)
566{
567 BT_DBG("%s %p", hdev->name, hdev);
568
569 hci_req_cancel(hdev, ENODEV);
570 hci_req_lock(hdev);
571
572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
573 hci_req_unlock(hdev);
574 return 0;
575 }
576
577 /* Kill RX and TX tasks */
578 tasklet_kill(&hdev->rx_task);
579 tasklet_kill(&hdev->tx_task);
580
581 hci_dev_lock_bh(hdev);
582 inquiry_cache_flush(hdev);
583 hci_conn_hash_flush(hdev);
584 hci_dev_unlock_bh(hdev);
585
586 hci_notify(hdev, HCI_DEV_DOWN);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 /* Reset device */
592 skb_queue_purge(&hdev->cmd_q);
593 atomic_set(&hdev->cmd_cnt, 1);
594 if (!test_bit(HCI_RAW, &hdev->flags)) {
595 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200596 __hci_request(hdev, hci_reset_req, 0,
597 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 clear_bit(HCI_INIT, &hdev->flags);
599 }
600
601 /* Kill cmd task */
602 tasklet_kill(&hdev->cmd_task);
603
604 /* Drop queues */
605 skb_queue_purge(&hdev->rx_q);
606 skb_queue_purge(&hdev->cmd_q);
607 skb_queue_purge(&hdev->raw_q);
608
609 /* Drop last sent command */
610 if (hdev->sent_cmd) {
611 kfree_skb(hdev->sent_cmd);
612 hdev->sent_cmd = NULL;
613 }
614
615 /* After this point our queues are empty
616 * and no tasks are scheduled. */
617 hdev->close(hdev);
618
Johan Hedberg5add6af2010-12-16 10:00:37 +0200619 mgmt_powered(hdev->id, 0);
620
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 /* Clear flags */
622 hdev->flags = 0;
623
624 hci_req_unlock(hdev);
625
626 hci_dev_put(hdev);
627 return 0;
628}
629
630int hci_dev_close(__u16 dev)
631{
632 struct hci_dev *hdev;
633 int err;
634
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200635 hdev = hci_dev_get(dev);
636 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 return -ENODEV;
638 err = hci_dev_do_close(hdev);
639 hci_dev_put(hdev);
640 return err;
641}
642
643int hci_dev_reset(__u16 dev)
644{
645 struct hci_dev *hdev;
646 int ret = 0;
647
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200648 hdev = hci_dev_get(dev);
649 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return -ENODEV;
651
652 hci_req_lock(hdev);
653 tasklet_disable(&hdev->tx_task);
654
655 if (!test_bit(HCI_UP, &hdev->flags))
656 goto done;
657
658 /* Drop queues */
659 skb_queue_purge(&hdev->rx_q);
660 skb_queue_purge(&hdev->cmd_q);
661
662 hci_dev_lock_bh(hdev);
663 inquiry_cache_flush(hdev);
664 hci_conn_hash_flush(hdev);
665 hci_dev_unlock_bh(hdev);
666
667 if (hdev->flush)
668 hdev->flush(hdev);
669
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900670 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
672
673 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200674 ret = __hci_request(hdev, hci_reset_req, 0,
675 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677done:
678 tasklet_enable(&hdev->tx_task);
679 hci_req_unlock(hdev);
680 hci_dev_put(hdev);
681 return ret;
682}
683
684int hci_dev_reset_stat(__u16 dev)
685{
686 struct hci_dev *hdev;
687 int ret = 0;
688
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200689 hdev = hci_dev_get(dev);
690 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 return -ENODEV;
692
693 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
694
695 hci_dev_put(hdev);
696
697 return ret;
698}
699
700int hci_dev_cmd(unsigned int cmd, void __user *arg)
701{
702 struct hci_dev *hdev;
703 struct hci_dev_req dr;
704 int err = 0;
705
706 if (copy_from_user(&dr, arg, sizeof(dr)))
707 return -EFAULT;
708
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200709 hdev = hci_dev_get(dr.dev_id);
710 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 return -ENODEV;
712
713 switch (cmd) {
714 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200715 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
716 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 break;
718
719 case HCISETENCRYPT:
720 if (!lmp_encrypt_capable(hdev)) {
721 err = -EOPNOTSUPP;
722 break;
723 }
724
725 if (!test_bit(HCI_AUTH, &hdev->flags)) {
726 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200727 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
728 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 if (err)
730 break;
731 }
732
Marcel Holtmann04837f62006-07-03 10:02:33 +0200733 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 break;
736
737 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200738 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200742 case HCISETLINKPOL:
743 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
744 msecs_to_jiffies(HCI_INIT_TIMEOUT));
745 break;
746
747 case HCISETLINKMODE:
748 hdev->link_mode = ((__u16) dr.dev_opt) &
749 (HCI_LM_MASTER | HCI_LM_ACCEPT);
750 break;
751
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 case HCISETPTYPE:
753 hdev->pkt_type = (__u16) dr.dev_opt;
754 break;
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
758 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 break;
760
761 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200762 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
763 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 break;
765
766 default:
767 err = -EINVAL;
768 break;
769 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 hci_dev_put(hdev);
772 return err;
773}
774
775int hci_get_dev_list(void __user *arg)
776{
777 struct hci_dev_list_req *dl;
778 struct hci_dev_req *dr;
779 struct list_head *p;
780 int n = 0, size, err;
781 __u16 dev_num;
782
783 if (get_user(dev_num, (__u16 __user *) arg))
784 return -EFAULT;
785
786 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
787 return -EINVAL;
788
789 size = sizeof(*dl) + dev_num * sizeof(*dr);
790
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200791 dl = kzalloc(size, GFP_KERNEL);
792 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 return -ENOMEM;
794
795 dr = dl->dev_req;
796
797 read_lock_bh(&hci_dev_list_lock);
798 list_for_each(p, &hci_dev_list) {
799 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200800
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200802
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200803 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200804
805 if (!test_bit(HCI_MGMT, &hdev->flags))
806 set_bit(HCI_PAIRABLE, &hdev->flags);
807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 (dr + n)->dev_id = hdev->id;
809 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 if (++n >= dev_num)
812 break;
813 }
814 read_unlock_bh(&hci_dev_list_lock);
815
816 dl->dev_num = n;
817 size = sizeof(*dl) + n * sizeof(*dr);
818
819 err = copy_to_user(arg, dl, size);
820 kfree(dl);
821
822 return err ? -EFAULT : 0;
823}
824
825int hci_get_dev_info(void __user *arg)
826{
827 struct hci_dev *hdev;
828 struct hci_dev_info di;
829 int err = 0;
830
831 if (copy_from_user(&di, arg, sizeof(di)))
832 return -EFAULT;
833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200834 hdev = hci_dev_get(di.dev_id);
835 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 return -ENODEV;
837
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200838 hci_del_off_timer(hdev);
839
Johan Hedbergc542a062011-01-26 13:11:03 +0200840 if (!test_bit(HCI_MGMT, &hdev->flags))
841 set_bit(HCI_PAIRABLE, &hdev->flags);
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 strcpy(di.name, hdev->name);
844 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100845 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 di.flags = hdev->flags;
847 di.pkt_type = hdev->pkt_type;
848 di.acl_mtu = hdev->acl_mtu;
849 di.acl_pkts = hdev->acl_pkts;
850 di.sco_mtu = hdev->sco_mtu;
851 di.sco_pkts = hdev->sco_pkts;
852 di.link_policy = hdev->link_policy;
853 di.link_mode = hdev->link_mode;
854
855 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
856 memcpy(&di.features, &hdev->features, sizeof(di.features));
857
858 if (copy_to_user(arg, &di, sizeof(di)))
859 err = -EFAULT;
860
861 hci_dev_put(hdev);
862
863 return err;
864}
865
866/* ---- Interface to HCI drivers ---- */
867
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200868static int hci_rfkill_set_block(void *data, bool blocked)
869{
870 struct hci_dev *hdev = data;
871
872 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
873
874 if (!blocked)
875 return 0;
876
877 hci_dev_do_close(hdev);
878
879 return 0;
880}
881
882static const struct rfkill_ops hci_rfkill_ops = {
883 .set_block = hci_rfkill_set_block,
884};
885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886/* Alloc HCI device */
887struct hci_dev *hci_alloc_dev(void)
888{
889 struct hci_dev *hdev;
890
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200891 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 if (!hdev)
893 return NULL;
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 skb_queue_head_init(&hdev->driver_init);
896
897 return hdev;
898}
899EXPORT_SYMBOL(hci_alloc_dev);
900
901/* Free HCI device */
902void hci_free_dev(struct hci_dev *hdev)
903{
904 skb_queue_purge(&hdev->driver_init);
905
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200906 /* will free via device release */
907 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909EXPORT_SYMBOL(hci_free_dev);
910
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200911static void hci_power_on(struct work_struct *work)
912{
913 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
914
915 BT_DBG("%s", hdev->name);
916
917 if (hci_dev_open(hdev->id) < 0)
918 return;
919
920 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
921 mod_timer(&hdev->off_timer,
922 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
923
924 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
925 mgmt_index_added(hdev->id);
926}
927
928static void hci_power_off(struct work_struct *work)
929{
930 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
931
932 BT_DBG("%s", hdev->name);
933
934 hci_dev_close(hdev->id);
935}
936
937static void hci_auto_off(unsigned long data)
938{
939 struct hci_dev *hdev = (struct hci_dev *) data;
940
941 BT_DBG("%s", hdev->name);
942
943 clear_bit(HCI_AUTO_OFF, &hdev->flags);
944
945 queue_work(hdev->workqueue, &hdev->power_off);
946}
947
948void hci_del_off_timer(struct hci_dev *hdev)
949{
950 BT_DBG("%s", hdev->name);
951
952 clear_bit(HCI_AUTO_OFF, &hdev->flags);
953 del_timer(&hdev->off_timer);
954}
955
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200956int hci_uuids_clear(struct hci_dev *hdev)
957{
958 struct list_head *p, *n;
959
960 list_for_each_safe(p, n, &hdev->uuids) {
961 struct bt_uuid *uuid;
962
963 uuid = list_entry(p, struct bt_uuid, list);
964
965 list_del(p);
966 kfree(uuid);
967 }
968
969 return 0;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972/* Register HCI device */
973int hci_register_dev(struct hci_dev *hdev)
974{
975 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200976 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100978 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
979 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980
981 if (!hdev->open || !hdev->close || !hdev->destruct)
982 return -EINVAL;
983
984 write_lock_bh(&hci_dev_list_lock);
985
986 /* Find first available device id */
987 list_for_each(p, &hci_dev_list) {
988 if (list_entry(p, struct hci_dev, list)->id != id)
989 break;
990 head = p; id++;
991 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 sprintf(hdev->name, "hci%d", id);
994 hdev->id = id;
995 list_add(&hdev->list, head);
996
997 atomic_set(&hdev->refcnt, 1);
998 spin_lock_init(&hdev->lock);
999
1000 hdev->flags = 0;
1001 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001002 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 hdev->link_mode = (HCI_LM_ACCEPT);
1004
Marcel Holtmann04837f62006-07-03 10:02:33 +02001005 hdev->idle_timeout = 0;
1006 hdev->sniff_max_interval = 800;
1007 hdev->sniff_min_interval = 80;
1008
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001009 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1011 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1012
1013 skb_queue_head_init(&hdev->rx_q);
1014 skb_queue_head_init(&hdev->cmd_q);
1015 skb_queue_head_init(&hdev->raw_q);
1016
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301017 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001018 hdev->reassembly[i] = NULL;
1019
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001021 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022
1023 inquiry_cache_init(hdev);
1024
1025 hci_conn_hash_init(hdev);
1026
David Millerea4bd8b2010-07-30 21:54:49 -07001027 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001028
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001029 INIT_LIST_HEAD(&hdev->uuids);
1030
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001031 INIT_WORK(&hdev->power_on, hci_power_on);
1032 INIT_WORK(&hdev->power_off, hci_power_off);
1033 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1036
1037 atomic_set(&hdev->promisc, 0);
1038
1039 write_unlock_bh(&hci_dev_list_lock);
1040
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001041 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1042 if (!hdev->workqueue)
1043 goto nomem;
1044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 hci_register_sysfs(hdev);
1046
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001047 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1048 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1049 if (hdev->rfkill) {
1050 if (rfkill_register(hdev->rfkill) < 0) {
1051 rfkill_destroy(hdev->rfkill);
1052 hdev->rfkill = NULL;
1053 }
1054 }
1055
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001056 set_bit(HCI_AUTO_OFF, &hdev->flags);
1057 set_bit(HCI_SETUP, &hdev->flags);
1058 queue_work(hdev->workqueue, &hdev->power_on);
1059
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 hci_notify(hdev, HCI_DEV_REG);
1061
1062 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001063
1064nomem:
1065 write_lock_bh(&hci_dev_list_lock);
1066 list_del(&hdev->list);
1067 write_unlock_bh(&hci_dev_list_lock);
1068
1069 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071EXPORT_SYMBOL(hci_register_dev);
1072
1073/* Unregister HCI device */
1074int hci_unregister_dev(struct hci_dev *hdev)
1075{
Marcel Holtmannef222012007-07-11 06:42:04 +02001076 int i;
1077
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001078 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 write_lock_bh(&hci_dev_list_lock);
1081 list_del(&hdev->list);
1082 write_unlock_bh(&hci_dev_list_lock);
1083
1084 hci_dev_do_close(hdev);
1085
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301086 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001087 kfree_skb(hdev->reassembly[i]);
1088
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001089 if (!test_bit(HCI_INIT, &hdev->flags) &&
1090 !test_bit(HCI_SETUP, &hdev->flags))
1091 mgmt_index_removed(hdev->id);
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 hci_notify(hdev, HCI_DEV_UNREG);
1094
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001095 if (hdev->rfkill) {
1096 rfkill_unregister(hdev->rfkill);
1097 rfkill_destroy(hdev->rfkill);
1098 }
1099
Dave Young147e2d52008-03-05 18:45:59 -08001100 hci_unregister_sysfs(hdev);
1101
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001102 destroy_workqueue(hdev->workqueue);
1103
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001104 hci_dev_lock_bh(hdev);
1105 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001106 hci_uuids_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001107 hci_dev_unlock_bh(hdev);
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return 0;
1112}
1113EXPORT_SYMBOL(hci_unregister_dev);
1114
1115/* Suspend HCI device */
1116int hci_suspend_dev(struct hci_dev *hdev)
1117{
1118 hci_notify(hdev, HCI_DEV_SUSPEND);
1119 return 0;
1120}
1121EXPORT_SYMBOL(hci_suspend_dev);
1122
1123/* Resume HCI device */
1124int hci_resume_dev(struct hci_dev *hdev)
1125{
1126 hci_notify(hdev, HCI_DEV_RESUME);
1127 return 0;
1128}
1129EXPORT_SYMBOL(hci_resume_dev);
1130
Marcel Holtmann76bca882009-11-18 00:40:39 +01001131/* Receive frame from HCI drivers */
1132int hci_recv_frame(struct sk_buff *skb)
1133{
1134 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1135 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1136 && !test_bit(HCI_INIT, &hdev->flags))) {
1137 kfree_skb(skb);
1138 return -ENXIO;
1139 }
1140
1141 /* Incomming skb */
1142 bt_cb(skb)->incoming = 1;
1143
1144 /* Time stamp */
1145 __net_timestamp(skb);
1146
1147 /* Queue frame for rx task */
1148 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001149 tasklet_schedule(&hdev->rx_task);
1150
Marcel Holtmann76bca882009-11-18 00:40:39 +01001151 return 0;
1152}
1153EXPORT_SYMBOL(hci_recv_frame);
1154
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301155static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1156 int count, __u8 index, gfp_t gfp_mask)
1157{
1158 int len = 0;
1159 int hlen = 0;
1160 int remain = count;
1161 struct sk_buff *skb;
1162 struct bt_skb_cb *scb;
1163
1164 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1165 index >= NUM_REASSEMBLY)
1166 return -EILSEQ;
1167
1168 skb = hdev->reassembly[index];
1169
1170 if (!skb) {
1171 switch (type) {
1172 case HCI_ACLDATA_PKT:
1173 len = HCI_MAX_FRAME_SIZE;
1174 hlen = HCI_ACL_HDR_SIZE;
1175 break;
1176 case HCI_EVENT_PKT:
1177 len = HCI_MAX_EVENT_SIZE;
1178 hlen = HCI_EVENT_HDR_SIZE;
1179 break;
1180 case HCI_SCODATA_PKT:
1181 len = HCI_MAX_SCO_SIZE;
1182 hlen = HCI_SCO_HDR_SIZE;
1183 break;
1184 }
1185
1186 skb = bt_skb_alloc(len, gfp_mask);
1187 if (!skb)
1188 return -ENOMEM;
1189
1190 scb = (void *) skb->cb;
1191 scb->expect = hlen;
1192 scb->pkt_type = type;
1193
1194 skb->dev = (void *) hdev;
1195 hdev->reassembly[index] = skb;
1196 }
1197
1198 while (count) {
1199 scb = (void *) skb->cb;
1200 len = min(scb->expect, (__u16)count);
1201
1202 memcpy(skb_put(skb, len), data, len);
1203
1204 count -= len;
1205 data += len;
1206 scb->expect -= len;
1207 remain = count;
1208
1209 switch (type) {
1210 case HCI_EVENT_PKT:
1211 if (skb->len == HCI_EVENT_HDR_SIZE) {
1212 struct hci_event_hdr *h = hci_event_hdr(skb);
1213 scb->expect = h->plen;
1214
1215 if (skb_tailroom(skb) < scb->expect) {
1216 kfree_skb(skb);
1217 hdev->reassembly[index] = NULL;
1218 return -ENOMEM;
1219 }
1220 }
1221 break;
1222
1223 case HCI_ACLDATA_PKT:
1224 if (skb->len == HCI_ACL_HDR_SIZE) {
1225 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1226 scb->expect = __le16_to_cpu(h->dlen);
1227
1228 if (skb_tailroom(skb) < scb->expect) {
1229 kfree_skb(skb);
1230 hdev->reassembly[index] = NULL;
1231 return -ENOMEM;
1232 }
1233 }
1234 break;
1235
1236 case HCI_SCODATA_PKT:
1237 if (skb->len == HCI_SCO_HDR_SIZE) {
1238 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1239 scb->expect = h->dlen;
1240
1241 if (skb_tailroom(skb) < scb->expect) {
1242 kfree_skb(skb);
1243 hdev->reassembly[index] = NULL;
1244 return -ENOMEM;
1245 }
1246 }
1247 break;
1248 }
1249
1250 if (scb->expect == 0) {
1251 /* Complete frame */
1252
1253 bt_cb(skb)->pkt_type = type;
1254 hci_recv_frame(skb);
1255
1256 hdev->reassembly[index] = NULL;
1257 return remain;
1258 }
1259 }
1260
1261 return remain;
1262}
1263
Marcel Holtmannef222012007-07-11 06:42:04 +02001264int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1265{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301266 int rem = 0;
1267
Marcel Holtmannef222012007-07-11 06:42:04 +02001268 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1269 return -EILSEQ;
1270
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001271 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301272 rem = hci_reassembly(hdev, type, data, count,
1273 type - 1, GFP_ATOMIC);
1274 if (rem < 0)
1275 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001276
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301277 data += (count - rem);
1278 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001279 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001280
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301281 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001282}
1283EXPORT_SYMBOL(hci_recv_fragment);
1284
Suraj Sumangala99811512010-07-14 13:02:19 +05301285#define STREAM_REASSEMBLY 0
1286
1287int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1288{
1289 int type;
1290 int rem = 0;
1291
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001292 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301293 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1294
1295 if (!skb) {
1296 struct { char type; } *pkt;
1297
1298 /* Start of the frame */
1299 pkt = data;
1300 type = pkt->type;
1301
1302 data++;
1303 count--;
1304 } else
1305 type = bt_cb(skb)->pkt_type;
1306
1307 rem = hci_reassembly(hdev, type, data,
1308 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1309 if (rem < 0)
1310 return rem;
1311
1312 data += (count - rem);
1313 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001314 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301315
1316 return rem;
1317}
1318EXPORT_SYMBOL(hci_recv_stream_fragment);
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320/* ---- Interface to upper protocols ---- */
1321
1322/* Register/Unregister protocols.
1323 * hci_task_lock is used to ensure that no tasks are running. */
1324int hci_register_proto(struct hci_proto *hp)
1325{
1326 int err = 0;
1327
1328 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1329
1330 if (hp->id >= HCI_MAX_PROTO)
1331 return -EINVAL;
1332
1333 write_lock_bh(&hci_task_lock);
1334
1335 if (!hci_proto[hp->id])
1336 hci_proto[hp->id] = hp;
1337 else
1338 err = -EEXIST;
1339
1340 write_unlock_bh(&hci_task_lock);
1341
1342 return err;
1343}
1344EXPORT_SYMBOL(hci_register_proto);
1345
1346int hci_unregister_proto(struct hci_proto *hp)
1347{
1348 int err = 0;
1349
1350 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1351
1352 if (hp->id >= HCI_MAX_PROTO)
1353 return -EINVAL;
1354
1355 write_lock_bh(&hci_task_lock);
1356
1357 if (hci_proto[hp->id])
1358 hci_proto[hp->id] = NULL;
1359 else
1360 err = -ENOENT;
1361
1362 write_unlock_bh(&hci_task_lock);
1363
1364 return err;
1365}
1366EXPORT_SYMBOL(hci_unregister_proto);
1367
1368int hci_register_cb(struct hci_cb *cb)
1369{
1370 BT_DBG("%p name %s", cb, cb->name);
1371
1372 write_lock_bh(&hci_cb_list_lock);
1373 list_add(&cb->list, &hci_cb_list);
1374 write_unlock_bh(&hci_cb_list_lock);
1375
1376 return 0;
1377}
1378EXPORT_SYMBOL(hci_register_cb);
1379
1380int hci_unregister_cb(struct hci_cb *cb)
1381{
1382 BT_DBG("%p name %s", cb, cb->name);
1383
1384 write_lock_bh(&hci_cb_list_lock);
1385 list_del(&cb->list);
1386 write_unlock_bh(&hci_cb_list_lock);
1387
1388 return 0;
1389}
1390EXPORT_SYMBOL(hci_unregister_cb);
1391
1392static int hci_send_frame(struct sk_buff *skb)
1393{
1394 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1395
1396 if (!hdev) {
1397 kfree_skb(skb);
1398 return -ENODEV;
1399 }
1400
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001401 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
1403 if (atomic_read(&hdev->promisc)) {
1404 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001405 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001407 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 }
1409
1410 /* Get rid of skb owner, prior to sending to the driver. */
1411 skb_orphan(skb);
1412
1413 return hdev->send(skb);
1414}
1415
1416/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001417int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418{
1419 int len = HCI_COMMAND_HDR_SIZE + plen;
1420 struct hci_command_hdr *hdr;
1421 struct sk_buff *skb;
1422
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001423 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 skb = bt_skb_alloc(len, GFP_ATOMIC);
1426 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001427 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 return -ENOMEM;
1429 }
1430
1431 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001432 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 hdr->plen = plen;
1434
1435 if (plen)
1436 memcpy(skb_put(skb, plen), param, plen);
1437
1438 BT_DBG("skb len %d", skb->len);
1439
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001440 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001442
Johan Hedberga5040ef2011-01-10 13:28:59 +02001443 if (test_bit(HCI_INIT, &hdev->flags))
1444 hdev->init_last_cmd = opcode;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001447 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 return 0;
1450}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
1452/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001453void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454{
1455 struct hci_command_hdr *hdr;
1456
1457 if (!hdev->sent_cmd)
1458 return NULL;
1459
1460 hdr = (void *) hdev->sent_cmd->data;
1461
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001462 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 return NULL;
1464
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001465 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
1467 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1468}
1469
1470/* Send ACL data */
1471static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1472{
1473 struct hci_acl_hdr *hdr;
1474 int len = skb->len;
1475
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001476 skb_push(skb, HCI_ACL_HDR_SIZE);
1477 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001478 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001479 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1480 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001483void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484{
1485 struct hci_dev *hdev = conn->hdev;
1486 struct sk_buff *list;
1487
1488 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1489
1490 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001491 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001492 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001494 list = skb_shinfo(skb)->frag_list;
1495 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 /* Non fragmented */
1497 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1498
1499 skb_queue_tail(&conn->data_q, skb);
1500 } else {
1501 /* Fragmented */
1502 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1503
1504 skb_shinfo(skb)->frag_list = NULL;
1505
1506 /* Queue all fragments atomically */
1507 spin_lock_bh(&conn->data_q.lock);
1508
1509 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001510
1511 flags &= ~ACL_START;
1512 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 do {
1514 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001517 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001518 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1521
1522 __skb_queue_tail(&conn->data_q, skb);
1523 } while (list);
1524
1525 spin_unlock_bh(&conn->data_q.lock);
1526 }
1527
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001528 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529}
1530EXPORT_SYMBOL(hci_send_acl);
1531
1532/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001533void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534{
1535 struct hci_dev *hdev = conn->hdev;
1536 struct hci_sco_hdr hdr;
1537
1538 BT_DBG("%s len %d", hdev->name, skb->len);
1539
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001540 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 hdr.dlen = skb->len;
1542
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001543 skb_push(skb, HCI_SCO_HDR_SIZE);
1544 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001545 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001548 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001549
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001551 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552}
1553EXPORT_SYMBOL(hci_send_sco);
1554
1555/* ---- HCI TX task (outgoing data) ---- */
1556
1557/* HCI Connection scheduler */
1558static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1559{
1560 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001561 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 int num = 0, min = ~0;
1563 struct list_head *p;
1564
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001565 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 * added and removed with TX task disabled. */
1567 list_for_each(p, &h->list) {
1568 struct hci_conn *c;
1569 c = list_entry(p, struct hci_conn, list);
1570
Marcel Holtmann769be972008-07-14 20:13:49 +02001571 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001573
1574 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1575 continue;
1576
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 num++;
1578
1579 if (c->sent < min) {
1580 min = c->sent;
1581 conn = c;
1582 }
1583 }
1584
1585 if (conn) {
1586 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1587 int q = cnt / num;
1588 *quote = q ? q : 1;
1589 } else
1590 *quote = 0;
1591
1592 BT_DBG("conn %p quote %d", conn, *quote);
1593 return conn;
1594}
1595
1596static inline void hci_acl_tx_to(struct hci_dev *hdev)
1597{
1598 struct hci_conn_hash *h = &hdev->conn_hash;
1599 struct list_head *p;
1600 struct hci_conn *c;
1601
1602 BT_ERR("%s ACL tx timeout", hdev->name);
1603
1604 /* Kill stalled connections */
1605 list_for_each(p, &h->list) {
1606 c = list_entry(p, struct hci_conn, list);
1607 if (c->type == ACL_LINK && c->sent) {
1608 BT_ERR("%s killing stalled ACL connection %s",
1609 hdev->name, batostr(&c->dst));
1610 hci_acl_disconn(c, 0x13);
1611 }
1612 }
1613}
1614
1615static inline void hci_sched_acl(struct hci_dev *hdev)
1616{
1617 struct hci_conn *conn;
1618 struct sk_buff *skb;
1619 int quote;
1620
1621 BT_DBG("%s", hdev->name);
1622
1623 if (!test_bit(HCI_RAW, &hdev->flags)) {
1624 /* ACL tx timeout must be longer than maximum
1625 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001626 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 hci_acl_tx_to(hdev);
1628 }
1629
1630 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1631 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1632 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001633
1634 hci_conn_enter_active_mode(conn);
1635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 hci_send_frame(skb);
1637 hdev->acl_last_tx = jiffies;
1638
1639 hdev->acl_cnt--;
1640 conn->sent++;
1641 }
1642 }
1643}
1644
1645/* Schedule SCO */
1646static inline void hci_sched_sco(struct hci_dev *hdev)
1647{
1648 struct hci_conn *conn;
1649 struct sk_buff *skb;
1650 int quote;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1655 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1656 BT_DBG("skb %p len %d", skb, skb->len);
1657 hci_send_frame(skb);
1658
1659 conn->sent++;
1660 if (conn->sent == ~0)
1661 conn->sent = 0;
1662 }
1663 }
1664}
1665
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001666static inline void hci_sched_esco(struct hci_dev *hdev)
1667{
1668 struct hci_conn *conn;
1669 struct sk_buff *skb;
1670 int quote;
1671
1672 BT_DBG("%s", hdev->name);
1673
1674 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1675 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1676 BT_DBG("skb %p len %d", skb, skb->len);
1677 hci_send_frame(skb);
1678
1679 conn->sent++;
1680 if (conn->sent == ~0)
1681 conn->sent = 0;
1682 }
1683 }
1684}
1685
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686static void hci_tx_task(unsigned long arg)
1687{
1688 struct hci_dev *hdev = (struct hci_dev *) arg;
1689 struct sk_buff *skb;
1690
1691 read_lock(&hci_task_lock);
1692
1693 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1694
1695 /* Schedule queues and send stuff to HCI driver */
1696
1697 hci_sched_acl(hdev);
1698
1699 hci_sched_sco(hdev);
1700
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001701 hci_sched_esco(hdev);
1702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 /* Send next queued raw (unknown type) packet */
1704 while ((skb = skb_dequeue(&hdev->raw_q)))
1705 hci_send_frame(skb);
1706
1707 read_unlock(&hci_task_lock);
1708}
1709
1710/* ----- HCI RX task (incoming data proccessing) ----- */
1711
1712/* ACL data packet */
1713static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1714{
1715 struct hci_acl_hdr *hdr = (void *) skb->data;
1716 struct hci_conn *conn;
1717 __u16 handle, flags;
1718
1719 skb_pull(skb, HCI_ACL_HDR_SIZE);
1720
1721 handle = __le16_to_cpu(hdr->handle);
1722 flags = hci_flags(handle);
1723 handle = hci_handle(handle);
1724
1725 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1726
1727 hdev->stat.acl_rx++;
1728
1729 hci_dev_lock(hdev);
1730 conn = hci_conn_hash_lookup_handle(hdev, handle);
1731 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 if (conn) {
1734 register struct hci_proto *hp;
1735
Marcel Holtmann04837f62006-07-03 10:02:33 +02001736 hci_conn_enter_active_mode(conn);
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001739 hp = hci_proto[HCI_PROTO_L2CAP];
1740 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 hp->recv_acldata(conn, skb, flags);
1742 return;
1743 }
1744 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001745 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 hdev->name, handle);
1747 }
1748
1749 kfree_skb(skb);
1750}
1751
1752/* SCO data packet */
1753static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1754{
1755 struct hci_sco_hdr *hdr = (void *) skb->data;
1756 struct hci_conn *conn;
1757 __u16 handle;
1758
1759 skb_pull(skb, HCI_SCO_HDR_SIZE);
1760
1761 handle = __le16_to_cpu(hdr->handle);
1762
1763 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1764
1765 hdev->stat.sco_rx++;
1766
1767 hci_dev_lock(hdev);
1768 conn = hci_conn_hash_lookup_handle(hdev, handle);
1769 hci_dev_unlock(hdev);
1770
1771 if (conn) {
1772 register struct hci_proto *hp;
1773
1774 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001775 hp = hci_proto[HCI_PROTO_SCO];
1776 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 hp->recv_scodata(conn, skb);
1778 return;
1779 }
1780 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001781 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 hdev->name, handle);
1783 }
1784
1785 kfree_skb(skb);
1786}
1787
Marcel Holtmann65164552005-10-28 19:20:48 +02001788static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789{
1790 struct hci_dev *hdev = (struct hci_dev *) arg;
1791 struct sk_buff *skb;
1792
1793 BT_DBG("%s", hdev->name);
1794
1795 read_lock(&hci_task_lock);
1796
1797 while ((skb = skb_dequeue(&hdev->rx_q))) {
1798 if (atomic_read(&hdev->promisc)) {
1799 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001800 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 }
1802
1803 if (test_bit(HCI_RAW, &hdev->flags)) {
1804 kfree_skb(skb);
1805 continue;
1806 }
1807
1808 if (test_bit(HCI_INIT, &hdev->flags)) {
1809 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001810 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811 case HCI_ACLDATA_PKT:
1812 case HCI_SCODATA_PKT:
1813 kfree_skb(skb);
1814 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 }
1817
1818 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001819 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 case HCI_EVENT_PKT:
1821 hci_event_packet(hdev, skb);
1822 break;
1823
1824 case HCI_ACLDATA_PKT:
1825 BT_DBG("%s ACL data packet", hdev->name);
1826 hci_acldata_packet(hdev, skb);
1827 break;
1828
1829 case HCI_SCODATA_PKT:
1830 BT_DBG("%s SCO data packet", hdev->name);
1831 hci_scodata_packet(hdev, skb);
1832 break;
1833
1834 default:
1835 kfree_skb(skb);
1836 break;
1837 }
1838 }
1839
1840 read_unlock(&hci_task_lock);
1841}
1842
1843static void hci_cmd_task(unsigned long arg)
1844{
1845 struct hci_dev *hdev = (struct hci_dev *) arg;
1846 struct sk_buff *skb;
1847
1848 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1849
S.Çağlar Onur82453022008-02-17 23:25:57 -08001850 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 BT_ERR("%s command tx timeout", hdev->name);
1852 atomic_set(&hdev->cmd_cnt, 1);
1853 }
1854
1855 /* Send queued commands */
1856 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001857 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001859 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1860 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 atomic_dec(&hdev->cmd_cnt);
1862 hci_send_frame(skb);
1863 hdev->cmd_last_tx = jiffies;
1864 } else {
1865 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001866 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 }
1868 }
1869}