blob: 51c61f75a7972170eceafb08b774adb2cd1d3da5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053static void hci_cmd_task(unsigned long arg);
54static void hci_rx_task(unsigned long arg);
55static void hci_tx_task(unsigned long arg);
56static void hci_notify(struct hci_dev *hdev, int event);
57
58static DEFINE_RWLOCK(hci_task_lock);
59
60/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
68/* HCI protocols */
69#define HCI_MAX_PROTO 2
70struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
94void hci_req_complete(struct hci_dev *hdev, int result)
95{
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
102 }
103}
104
105static void hci_req_cancel(struct hci_dev *hdev, int err)
106{
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
113 }
114}
115
116/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900117static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 unsigned long opt, __u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 int err = 0;
122
123 BT_DBG("%s start", hdev->name);
124
125 hdev->req_status = HCI_REQ_PEND;
126
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
129
130 req(hdev, opt);
131 schedule_timeout(timeout);
132
133 remove_wait_queue(&hdev->req_wait_q, &wait);
134
135 if (signal_pending(current))
136 return -EINTR;
137
138 switch (hdev->req_status) {
139 case HCI_REQ_DONE:
140 err = -bt_err(hdev->req_result);
141 break;
142
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
145 break;
146
147 default:
148 err = -ETIMEDOUT;
149 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
152 hdev->req_status = hdev->req_result = 0;
153
154 BT_DBG("%s end: err %d", hdev->name, err);
155
156 return err;
157}
158
159static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
161{
162 int ret;
163
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200164 if (!test_bit(HCI_UP, &hdev->flags))
165 return -ENETDOWN;
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 /* Serialize all requests */
168 hci_req_lock(hdev);
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
171
172 return ret;
173}
174
175static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176{
177 BT_DBG("%s %ld", hdev->name, opt);
178
179 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
182
183static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184{
185 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800186 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200187 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Driver initialization */
192
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100199 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 }
201 skb_queue_purge(&hdev->driver_init);
202
203 /* Mandatory initialization */
204
205 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218#if 0
219 /* Host buffer size */
220 {
221 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
228#endif
229
230 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241
242 /* Optional initialization */
243
244 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200245 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700249 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700253 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
256
257static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258{
259 __u8 scan = opt;
260
261 BT_DBG("%s %x", hdev->name, scan);
262
263 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268{
269 __u8 auth = opt;
270
271 BT_DBG("%s %x", hdev->name, auth);
272
273 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
277static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278{
279 __u8 encrypt = opt;
280
281 BT_DBG("%s %x", hdev->name, encrypt);
282
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200283 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285}
286
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200287static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288{
289 __le16 policy = cpu_to_le16(opt);
290
Marcel Holtmanna418b892008-11-30 12:17:28 +0100291 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295}
296
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900297/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 * Device is held on return. */
299struct hci_dev *hci_dev_get(int index)
300{
301 struct hci_dev *hdev = NULL;
302 struct list_head *p;
303
304 BT_DBG("%d", index);
305
306 if (index < 0)
307 return NULL;
308
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
314 break;
315 }
316 }
317 read_unlock(&hci_dev_list_lock);
318 return hdev;
319}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320
321/* ---- Inquiry support ---- */
322static void inquiry_cache_flush(struct hci_dev *hdev)
323{
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
326
327 BT_DBG("cache %p", cache);
328
329 cache->list = NULL;
330 while ((e = next)) {
331 next = e->next;
332 kfree(e);
333 }
334}
335
336struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337{
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
340
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
345 break;
346 return e;
347}
348
349void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200352 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200356 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
357 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200359 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
360 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200362
363 ie->next = cache->list;
364 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 }
366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200367 memcpy(&ie->data, data, sizeof(*data));
368 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 cache->timestamp = jiffies;
370}
371
372static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_info *info = (struct inquiry_info *) buf;
376 struct inquiry_entry *e;
377 int copied = 0;
378
379 for (e = cache->list; e && copied < num; e = e->next, copied++) {
380 struct inquiry_data *data = &e->data;
381 bacpy(&info->bdaddr, &data->bdaddr);
382 info->pscan_rep_mode = data->pscan_rep_mode;
383 info->pscan_period_mode = data->pscan_period_mode;
384 info->pscan_mode = data->pscan_mode;
385 memcpy(info->dev_class, data->dev_class, 3);
386 info->clock_offset = data->clock_offset;
387 info++;
388 }
389
390 BT_DBG("cache %p, copied %d", cache, copied);
391 return copied;
392}
393
394static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
395{
396 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
397 struct hci_cp_inquiry cp;
398
399 BT_DBG("%s", hdev->name);
400
401 if (test_bit(HCI_INQUIRY, &hdev->flags))
402 return;
403
404 /* Start Inquiry */
405 memcpy(&cp.lap, &ir->lap, 3);
406 cp.length = ir->length;
407 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200408 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409}
410
411int hci_inquiry(void __user *arg)
412{
413 __u8 __user *ptr = arg;
414 struct hci_inquiry_req ir;
415 struct hci_dev *hdev;
416 int err = 0, do_inquiry = 0, max_rsp;
417 long timeo;
418 __u8 *buf;
419
420 if (copy_from_user(&ir, ptr, sizeof(ir)))
421 return -EFAULT;
422
423 if (!(hdev = hci_dev_get(ir.dev_id)))
424 return -ENODEV;
425
426 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900427 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200428 inquiry_cache_empty(hdev) ||
429 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 inquiry_cache_flush(hdev);
431 do_inquiry = 1;
432 }
433 hci_dev_unlock_bh(hdev);
434
Marcel Holtmann04837f62006-07-03 10:02:33 +0200435 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436
437 if (do_inquiry) {
438 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
439 if (err < 0)
440 goto done;
441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 /* for unlimited number of responses we will use buffer with 255 entries */
444 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445
446 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
447 * copy it to the user space.
448 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
450 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 err = -ENOMEM;
452 goto done;
453 }
454
455 hci_dev_lock_bh(hdev);
456 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
457 hci_dev_unlock_bh(hdev);
458
459 BT_DBG("num_rsp %d", ir.num_rsp);
460
461 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462 ptr += sizeof(ir);
463 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
464 ir.num_rsp))
465 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900466 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 err = -EFAULT;
468
469 kfree(buf);
470
471done:
472 hci_dev_put(hdev);
473 return err;
474}
475
476/* ---- HCI ioctl helpers ---- */
477
478int hci_dev_open(__u16 dev)
479{
480 struct hci_dev *hdev;
481 int ret = 0;
482
483 if (!(hdev = hci_dev_get(dev)))
484 return -ENODEV;
485
486 BT_DBG("%s %p", hdev->name, hdev);
487
488 hci_req_lock(hdev);
489
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200490 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
491 ret = -ERFKILL;
492 goto done;
493 }
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (test_bit(HCI_UP, &hdev->flags)) {
496 ret = -EALREADY;
497 goto done;
498 }
499
500 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
501 set_bit(HCI_RAW, &hdev->flags);
502
Marcel Holtmann943da252010-02-13 02:28:41 +0100503 /* Treat all non BR/EDR controllers as raw devices for now */
504 if (hdev->dev_type != HCI_BREDR)
505 set_bit(HCI_RAW, &hdev->flags);
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 if (hdev->open(hdev)) {
508 ret = -EIO;
509 goto done;
510 }
511
512 if (!test_bit(HCI_RAW, &hdev->flags)) {
513 atomic_set(&hdev->cmd_cnt, 1);
514 set_bit(HCI_INIT, &hdev->flags);
515
516 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200517 ret = __hci_request(hdev, hci_init_req, 0,
518 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
520 clear_bit(HCI_INIT, &hdev->flags);
521 }
522
523 if (!ret) {
524 hci_dev_hold(hdev);
525 set_bit(HCI_UP, &hdev->flags);
526 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900527 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 /* Init failed, cleanup */
529 tasklet_kill(&hdev->rx_task);
530 tasklet_kill(&hdev->tx_task);
531 tasklet_kill(&hdev->cmd_task);
532
533 skb_queue_purge(&hdev->cmd_q);
534 skb_queue_purge(&hdev->rx_q);
535
536 if (hdev->flush)
537 hdev->flush(hdev);
538
539 if (hdev->sent_cmd) {
540 kfree_skb(hdev->sent_cmd);
541 hdev->sent_cmd = NULL;
542 }
543
544 hdev->close(hdev);
545 hdev->flags = 0;
546 }
547
548done:
549 hci_req_unlock(hdev);
550 hci_dev_put(hdev);
551 return ret;
552}
553
554static int hci_dev_do_close(struct hci_dev *hdev)
555{
556 BT_DBG("%s %p", hdev->name, hdev);
557
558 hci_req_cancel(hdev, ENODEV);
559 hci_req_lock(hdev);
560
561 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
562 hci_req_unlock(hdev);
563 return 0;
564 }
565
566 /* Kill RX and TX tasks */
567 tasklet_kill(&hdev->rx_task);
568 tasklet_kill(&hdev->tx_task);
569
570 hci_dev_lock_bh(hdev);
571 inquiry_cache_flush(hdev);
572 hci_conn_hash_flush(hdev);
573 hci_dev_unlock_bh(hdev);
574
575 hci_notify(hdev, HCI_DEV_DOWN);
576
577 if (hdev->flush)
578 hdev->flush(hdev);
579
580 /* Reset device */
581 skb_queue_purge(&hdev->cmd_q);
582 atomic_set(&hdev->cmd_cnt, 1);
583 if (!test_bit(HCI_RAW, &hdev->flags)) {
584 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200585 __hci_request(hdev, hci_reset_req, 0,
586 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 clear_bit(HCI_INIT, &hdev->flags);
588 }
589
590 /* Kill cmd task */
591 tasklet_kill(&hdev->cmd_task);
592
593 /* Drop queues */
594 skb_queue_purge(&hdev->rx_q);
595 skb_queue_purge(&hdev->cmd_q);
596 skb_queue_purge(&hdev->raw_q);
597
598 /* Drop last sent command */
599 if (hdev->sent_cmd) {
600 kfree_skb(hdev->sent_cmd);
601 hdev->sent_cmd = NULL;
602 }
603
604 /* After this point our queues are empty
605 * and no tasks are scheduled. */
606 hdev->close(hdev);
607
608 /* Clear flags */
609 hdev->flags = 0;
610
611 hci_req_unlock(hdev);
612
613 hci_dev_put(hdev);
614 return 0;
615}
616
617int hci_dev_close(__u16 dev)
618{
619 struct hci_dev *hdev;
620 int err;
621
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200622 hdev = hci_dev_get(dev);
623 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 return -ENODEV;
625 err = hci_dev_do_close(hdev);
626 hci_dev_put(hdev);
627 return err;
628}
629
630int hci_dev_reset(__u16 dev)
631{
632 struct hci_dev *hdev;
633 int ret = 0;
634
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200635 hdev = hci_dev_get(dev);
636 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 return -ENODEV;
638
639 hci_req_lock(hdev);
640 tasklet_disable(&hdev->tx_task);
641
642 if (!test_bit(HCI_UP, &hdev->flags))
643 goto done;
644
645 /* Drop queues */
646 skb_queue_purge(&hdev->rx_q);
647 skb_queue_purge(&hdev->cmd_q);
648
649 hci_dev_lock_bh(hdev);
650 inquiry_cache_flush(hdev);
651 hci_conn_hash_flush(hdev);
652 hci_dev_unlock_bh(hdev);
653
654 if (hdev->flush)
655 hdev->flush(hdev);
656
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900657 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
659
660 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200661 ret = __hci_request(hdev, hci_reset_req, 0,
662 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664done:
665 tasklet_enable(&hdev->tx_task);
666 hci_req_unlock(hdev);
667 hci_dev_put(hdev);
668 return ret;
669}
670
671int hci_dev_reset_stat(__u16 dev)
672{
673 struct hci_dev *hdev;
674 int ret = 0;
675
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200676 hdev = hci_dev_get(dev);
677 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return -ENODEV;
679
680 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
681
682 hci_dev_put(hdev);
683
684 return ret;
685}
686
687int hci_dev_cmd(unsigned int cmd, void __user *arg)
688{
689 struct hci_dev *hdev;
690 struct hci_dev_req dr;
691 int err = 0;
692
693 if (copy_from_user(&dr, arg, sizeof(dr)))
694 return -EFAULT;
695
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200696 hdev = hci_dev_get(dr.dev_id);
697 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698 return -ENODEV;
699
700 switch (cmd) {
701 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 break;
705
706 case HCISETENCRYPT:
707 if (!lmp_encrypt_capable(hdev)) {
708 err = -EOPNOTSUPP;
709 break;
710 }
711
712 if (!test_bit(HCI_AUTH, &hdev->flags)) {
713 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200714 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 if (err)
717 break;
718 }
719
Marcel Holtmann04837f62006-07-03 10:02:33 +0200720 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
721 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 break;
723
724 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200725 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 break;
728
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200729 case HCISETLINKPOL:
730 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
732 break;
733
734 case HCISETLINKMODE:
735 hdev->link_mode = ((__u16) dr.dev_opt) &
736 (HCI_LM_MASTER | HCI_LM_ACCEPT);
737 break;
738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 case HCISETPTYPE:
740 hdev->pkt_type = (__u16) dr.dev_opt;
741 break;
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
745 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 break;
747
748 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200749 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
750 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 break;
752
753 default:
754 err = -EINVAL;
755 break;
756 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 hci_dev_put(hdev);
759 return err;
760}
761
762int hci_get_dev_list(void __user *arg)
763{
764 struct hci_dev_list_req *dl;
765 struct hci_dev_req *dr;
766 struct list_head *p;
767 int n = 0, size, err;
768 __u16 dev_num;
769
770 if (get_user(dev_num, (__u16 __user *) arg))
771 return -EFAULT;
772
773 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
774 return -EINVAL;
775
776 size = sizeof(*dl) + dev_num * sizeof(*dr);
777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200778 dl = kzalloc(size, GFP_KERNEL);
779 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 return -ENOMEM;
781
782 dr = dl->dev_req;
783
784 read_lock_bh(&hci_dev_list_lock);
785 list_for_each(p, &hci_dev_list) {
786 struct hci_dev *hdev;
787 hdev = list_entry(p, struct hci_dev, list);
788 (dr + n)->dev_id = hdev->id;
789 (dr + n)->dev_opt = hdev->flags;
790 if (++n >= dev_num)
791 break;
792 }
793 read_unlock_bh(&hci_dev_list_lock);
794
795 dl->dev_num = n;
796 size = sizeof(*dl) + n * sizeof(*dr);
797
798 err = copy_to_user(arg, dl, size);
799 kfree(dl);
800
801 return err ? -EFAULT : 0;
802}
803
804int hci_get_dev_info(void __user *arg)
805{
806 struct hci_dev *hdev;
807 struct hci_dev_info di;
808 int err = 0;
809
810 if (copy_from_user(&di, arg, sizeof(di)))
811 return -EFAULT;
812
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200813 hdev = hci_dev_get(di.dev_id);
814 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return -ENODEV;
816
817 strcpy(di.name, hdev->name);
818 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100819 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 di.flags = hdev->flags;
821 di.pkt_type = hdev->pkt_type;
822 di.acl_mtu = hdev->acl_mtu;
823 di.acl_pkts = hdev->acl_pkts;
824 di.sco_mtu = hdev->sco_mtu;
825 di.sco_pkts = hdev->sco_pkts;
826 di.link_policy = hdev->link_policy;
827 di.link_mode = hdev->link_mode;
828
829 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
830 memcpy(&di.features, &hdev->features, sizeof(di.features));
831
832 if (copy_to_user(arg, &di, sizeof(di)))
833 err = -EFAULT;
834
835 hci_dev_put(hdev);
836
837 return err;
838}
839
840/* ---- Interface to HCI drivers ---- */
841
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200842static int hci_rfkill_set_block(void *data, bool blocked)
843{
844 struct hci_dev *hdev = data;
845
846 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
847
848 if (!blocked)
849 return 0;
850
851 hci_dev_do_close(hdev);
852
853 return 0;
854}
855
856static const struct rfkill_ops hci_rfkill_ops = {
857 .set_block = hci_rfkill_set_block,
858};
859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860/* Alloc HCI device */
861struct hci_dev *hci_alloc_dev(void)
862{
863 struct hci_dev *hdev;
864
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200865 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 if (!hdev)
867 return NULL;
868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 skb_queue_head_init(&hdev->driver_init);
870
871 return hdev;
872}
873EXPORT_SYMBOL(hci_alloc_dev);
874
875/* Free HCI device */
876void hci_free_dev(struct hci_dev *hdev)
877{
878 skb_queue_purge(&hdev->driver_init);
879
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200880 /* will free via device release */
881 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882}
883EXPORT_SYMBOL(hci_free_dev);
884
885/* Register HCI device */
886int hci_register_dev(struct hci_dev *hdev)
887{
888 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200889 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100891 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
892 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 if (!hdev->open || !hdev->close || !hdev->destruct)
895 return -EINVAL;
896
897 write_lock_bh(&hci_dev_list_lock);
898
899 /* Find first available device id */
900 list_for_each(p, &hci_dev_list) {
901 if (list_entry(p, struct hci_dev, list)->id != id)
902 break;
903 head = p; id++;
904 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900905
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 sprintf(hdev->name, "hci%d", id);
907 hdev->id = id;
908 list_add(&hdev->list, head);
909
910 atomic_set(&hdev->refcnt, 1);
911 spin_lock_init(&hdev->lock);
912
913 hdev->flags = 0;
914 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +0200915 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 hdev->link_mode = (HCI_LM_ACCEPT);
917
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 hdev->idle_timeout = 0;
919 hdev->sniff_max_interval = 800;
920 hdev->sniff_min_interval = 80;
921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200922 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
924 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
925
926 skb_queue_head_init(&hdev->rx_q);
927 skb_queue_head_init(&hdev->cmd_q);
928 skb_queue_head_init(&hdev->raw_q);
929
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530930 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200931 hdev->reassembly[i] = NULL;
932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000934 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 inquiry_cache_init(hdev);
937
938 hci_conn_hash_init(hdev);
939
David Millerea4bd8b2010-07-30 21:54:49 -0700940 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +0200941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
943
944 atomic_set(&hdev->promisc, 0);
945
946 write_unlock_bh(&hci_dev_list_lock);
947
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100948 hdev->workqueue = create_singlethread_workqueue(hdev->name);
949 if (!hdev->workqueue)
950 goto nomem;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 hci_register_sysfs(hdev);
953
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200954 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
955 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
956 if (hdev->rfkill) {
957 if (rfkill_register(hdev->rfkill) < 0) {
958 rfkill_destroy(hdev->rfkill);
959 hdev->rfkill = NULL;
960 }
961 }
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 hci_notify(hdev, HCI_DEV_REG);
964
965 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +0100966
967nomem:
968 write_lock_bh(&hci_dev_list_lock);
969 list_del(&hdev->list);
970 write_unlock_bh(&hci_dev_list_lock);
971
972 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973}
974EXPORT_SYMBOL(hci_register_dev);
975
976/* Unregister HCI device */
977int hci_unregister_dev(struct hci_dev *hdev)
978{
Marcel Holtmannef222012007-07-11 06:42:04 +0200979 int i;
980
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100981 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 write_lock_bh(&hci_dev_list_lock);
984 list_del(&hdev->list);
985 write_unlock_bh(&hci_dev_list_lock);
986
987 hci_dev_do_close(hdev);
988
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530989 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200990 kfree_skb(hdev->reassembly[i]);
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 hci_notify(hdev, HCI_DEV_UNREG);
993
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200994 if (hdev->rfkill) {
995 rfkill_unregister(hdev->rfkill);
996 rfkill_destroy(hdev->rfkill);
997 }
998
Dave Young147e2d52008-03-05 18:45:59 -0800999 hci_unregister_sysfs(hdev);
1000
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001001 destroy_workqueue(hdev->workqueue);
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return 0;
1006}
1007EXPORT_SYMBOL(hci_unregister_dev);
1008
1009/* Suspend HCI device */
1010int hci_suspend_dev(struct hci_dev *hdev)
1011{
1012 hci_notify(hdev, HCI_DEV_SUSPEND);
1013 return 0;
1014}
1015EXPORT_SYMBOL(hci_suspend_dev);
1016
1017/* Resume HCI device */
1018int hci_resume_dev(struct hci_dev *hdev)
1019{
1020 hci_notify(hdev, HCI_DEV_RESUME);
1021 return 0;
1022}
1023EXPORT_SYMBOL(hci_resume_dev);
1024
Marcel Holtmann76bca882009-11-18 00:40:39 +01001025/* Receive frame from HCI drivers */
1026int hci_recv_frame(struct sk_buff *skb)
1027{
1028 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1029 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1030 && !test_bit(HCI_INIT, &hdev->flags))) {
1031 kfree_skb(skb);
1032 return -ENXIO;
1033 }
1034
1035 /* Incomming skb */
1036 bt_cb(skb)->incoming = 1;
1037
1038 /* Time stamp */
1039 __net_timestamp(skb);
1040
1041 /* Queue frame for rx task */
1042 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001043 tasklet_schedule(&hdev->rx_task);
1044
Marcel Holtmann76bca882009-11-18 00:40:39 +01001045 return 0;
1046}
1047EXPORT_SYMBOL(hci_recv_frame);
1048
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301049static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1050 int count, __u8 index, gfp_t gfp_mask)
1051{
1052 int len = 0;
1053 int hlen = 0;
1054 int remain = count;
1055 struct sk_buff *skb;
1056 struct bt_skb_cb *scb;
1057
1058 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1059 index >= NUM_REASSEMBLY)
1060 return -EILSEQ;
1061
1062 skb = hdev->reassembly[index];
1063
1064 if (!skb) {
1065 switch (type) {
1066 case HCI_ACLDATA_PKT:
1067 len = HCI_MAX_FRAME_SIZE;
1068 hlen = HCI_ACL_HDR_SIZE;
1069 break;
1070 case HCI_EVENT_PKT:
1071 len = HCI_MAX_EVENT_SIZE;
1072 hlen = HCI_EVENT_HDR_SIZE;
1073 break;
1074 case HCI_SCODATA_PKT:
1075 len = HCI_MAX_SCO_SIZE;
1076 hlen = HCI_SCO_HDR_SIZE;
1077 break;
1078 }
1079
1080 skb = bt_skb_alloc(len, gfp_mask);
1081 if (!skb)
1082 return -ENOMEM;
1083
1084 scb = (void *) skb->cb;
1085 scb->expect = hlen;
1086 scb->pkt_type = type;
1087
1088 skb->dev = (void *) hdev;
1089 hdev->reassembly[index] = skb;
1090 }
1091
1092 while (count) {
1093 scb = (void *) skb->cb;
1094 len = min(scb->expect, (__u16)count);
1095
1096 memcpy(skb_put(skb, len), data, len);
1097
1098 count -= len;
1099 data += len;
1100 scb->expect -= len;
1101 remain = count;
1102
1103 switch (type) {
1104 case HCI_EVENT_PKT:
1105 if (skb->len == HCI_EVENT_HDR_SIZE) {
1106 struct hci_event_hdr *h = hci_event_hdr(skb);
1107 scb->expect = h->plen;
1108
1109 if (skb_tailroom(skb) < scb->expect) {
1110 kfree_skb(skb);
1111 hdev->reassembly[index] = NULL;
1112 return -ENOMEM;
1113 }
1114 }
1115 break;
1116
1117 case HCI_ACLDATA_PKT:
1118 if (skb->len == HCI_ACL_HDR_SIZE) {
1119 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1120 scb->expect = __le16_to_cpu(h->dlen);
1121
1122 if (skb_tailroom(skb) < scb->expect) {
1123 kfree_skb(skb);
1124 hdev->reassembly[index] = NULL;
1125 return -ENOMEM;
1126 }
1127 }
1128 break;
1129
1130 case HCI_SCODATA_PKT:
1131 if (skb->len == HCI_SCO_HDR_SIZE) {
1132 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1133 scb->expect = h->dlen;
1134
1135 if (skb_tailroom(skb) < scb->expect) {
1136 kfree_skb(skb);
1137 hdev->reassembly[index] = NULL;
1138 return -ENOMEM;
1139 }
1140 }
1141 break;
1142 }
1143
1144 if (scb->expect == 0) {
1145 /* Complete frame */
1146
1147 bt_cb(skb)->pkt_type = type;
1148 hci_recv_frame(skb);
1149
1150 hdev->reassembly[index] = NULL;
1151 return remain;
1152 }
1153 }
1154
1155 return remain;
1156}
1157
Marcel Holtmannef222012007-07-11 06:42:04 +02001158int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1159{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301160 int rem = 0;
1161
Marcel Holtmannef222012007-07-11 06:42:04 +02001162 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1163 return -EILSEQ;
1164
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001165 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301166 rem = hci_reassembly(hdev, type, data, count,
1167 type - 1, GFP_ATOMIC);
1168 if (rem < 0)
1169 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001170
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301171 data += (count - rem);
1172 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001173 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001174
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301175 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001176}
1177EXPORT_SYMBOL(hci_recv_fragment);
1178
Suraj Sumangala99811512010-07-14 13:02:19 +05301179#define STREAM_REASSEMBLY 0
1180
1181int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1182{
1183 int type;
1184 int rem = 0;
1185
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001186 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301187 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1188
1189 if (!skb) {
1190 struct { char type; } *pkt;
1191
1192 /* Start of the frame */
1193 pkt = data;
1194 type = pkt->type;
1195
1196 data++;
1197 count--;
1198 } else
1199 type = bt_cb(skb)->pkt_type;
1200
1201 rem = hci_reassembly(hdev, type, data,
1202 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1203 if (rem < 0)
1204 return rem;
1205
1206 data += (count - rem);
1207 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001208 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301209
1210 return rem;
1211}
1212EXPORT_SYMBOL(hci_recv_stream_fragment);
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214/* ---- Interface to upper protocols ---- */
1215
1216/* Register/Unregister protocols.
1217 * hci_task_lock is used to ensure that no tasks are running. */
1218int hci_register_proto(struct hci_proto *hp)
1219{
1220 int err = 0;
1221
1222 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1223
1224 if (hp->id >= HCI_MAX_PROTO)
1225 return -EINVAL;
1226
1227 write_lock_bh(&hci_task_lock);
1228
1229 if (!hci_proto[hp->id])
1230 hci_proto[hp->id] = hp;
1231 else
1232 err = -EEXIST;
1233
1234 write_unlock_bh(&hci_task_lock);
1235
1236 return err;
1237}
1238EXPORT_SYMBOL(hci_register_proto);
1239
1240int hci_unregister_proto(struct hci_proto *hp)
1241{
1242 int err = 0;
1243
1244 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1245
1246 if (hp->id >= HCI_MAX_PROTO)
1247 return -EINVAL;
1248
1249 write_lock_bh(&hci_task_lock);
1250
1251 if (hci_proto[hp->id])
1252 hci_proto[hp->id] = NULL;
1253 else
1254 err = -ENOENT;
1255
1256 write_unlock_bh(&hci_task_lock);
1257
1258 return err;
1259}
1260EXPORT_SYMBOL(hci_unregister_proto);
1261
1262int hci_register_cb(struct hci_cb *cb)
1263{
1264 BT_DBG("%p name %s", cb, cb->name);
1265
1266 write_lock_bh(&hci_cb_list_lock);
1267 list_add(&cb->list, &hci_cb_list);
1268 write_unlock_bh(&hci_cb_list_lock);
1269
1270 return 0;
1271}
1272EXPORT_SYMBOL(hci_register_cb);
1273
1274int hci_unregister_cb(struct hci_cb *cb)
1275{
1276 BT_DBG("%p name %s", cb, cb->name);
1277
1278 write_lock_bh(&hci_cb_list_lock);
1279 list_del(&cb->list);
1280 write_unlock_bh(&hci_cb_list_lock);
1281
1282 return 0;
1283}
1284EXPORT_SYMBOL(hci_unregister_cb);
1285
1286static int hci_send_frame(struct sk_buff *skb)
1287{
1288 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1289
1290 if (!hdev) {
1291 kfree_skb(skb);
1292 return -ENODEV;
1293 }
1294
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001295 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
1297 if (atomic_read(&hdev->promisc)) {
1298 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001299 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 hci_send_to_sock(hdev, skb);
1302 }
1303
1304 /* Get rid of skb owner, prior to sending to the driver. */
1305 skb_orphan(skb);
1306
1307 return hdev->send(skb);
1308}
1309
1310/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001311int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
1313 int len = HCI_COMMAND_HDR_SIZE + plen;
1314 struct hci_command_hdr *hdr;
1315 struct sk_buff *skb;
1316
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001317 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 skb = bt_skb_alloc(len, GFP_ATOMIC);
1320 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001321 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return -ENOMEM;
1323 }
1324
1325 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001326 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 hdr->plen = plen;
1328
1329 if (plen)
1330 memcpy(skb_put(skb, plen), param, plen);
1331
1332 BT_DBG("skb len %d", skb->len);
1333
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001334 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001336
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001338 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 return 0;
1341}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
1343/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001344void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345{
1346 struct hci_command_hdr *hdr;
1347
1348 if (!hdev->sent_cmd)
1349 return NULL;
1350
1351 hdr = (void *) hdev->sent_cmd->data;
1352
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001353 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return NULL;
1355
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001356 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1359}
1360
1361/* Send ACL data */
1362static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1363{
1364 struct hci_acl_hdr *hdr;
1365 int len = skb->len;
1366
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001367 skb_push(skb, HCI_ACL_HDR_SIZE);
1368 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001369 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001370 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1371 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372}
1373
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001374void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375{
1376 struct hci_dev *hdev = conn->hdev;
1377 struct sk_buff *list;
1378
1379 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1380
1381 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001382 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1384
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001385 list = skb_shinfo(skb)->frag_list;
1386 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 /* Non fragmented */
1388 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1389
1390 skb_queue_tail(&conn->data_q, skb);
1391 } else {
1392 /* Fragmented */
1393 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1394
1395 skb_shinfo(skb)->frag_list = NULL;
1396
1397 /* Queue all fragments atomically */
1398 spin_lock_bh(&conn->data_q.lock);
1399
1400 __skb_queue_tail(&conn->data_q, skb);
1401 do {
1402 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001405 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1407
1408 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1409
1410 __skb_queue_tail(&conn->data_q, skb);
1411 } while (list);
1412
1413 spin_unlock_bh(&conn->data_q.lock);
1414 }
1415
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001416 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417}
1418EXPORT_SYMBOL(hci_send_acl);
1419
1420/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001421void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422{
1423 struct hci_dev *hdev = conn->hdev;
1424 struct hci_sco_hdr hdr;
1425
1426 BT_DBG("%s len %d", hdev->name, skb->len);
1427
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001428 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 hdr.dlen = skb->len;
1430
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001431 skb_push(skb, HCI_SCO_HDR_SIZE);
1432 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001433 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
1435 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001436 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001439 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440}
1441EXPORT_SYMBOL(hci_send_sco);
1442
1443/* ---- HCI TX task (outgoing data) ---- */
1444
1445/* HCI Connection scheduler */
1446static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1447{
1448 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001449 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 int num = 0, min = ~0;
1451 struct list_head *p;
1452
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001453 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 * added and removed with TX task disabled. */
1455 list_for_each(p, &h->list) {
1456 struct hci_conn *c;
1457 c = list_entry(p, struct hci_conn, list);
1458
Marcel Holtmann769be972008-07-14 20:13:49 +02001459 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001461
1462 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1463 continue;
1464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 num++;
1466
1467 if (c->sent < min) {
1468 min = c->sent;
1469 conn = c;
1470 }
1471 }
1472
1473 if (conn) {
1474 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1475 int q = cnt / num;
1476 *quote = q ? q : 1;
1477 } else
1478 *quote = 0;
1479
1480 BT_DBG("conn %p quote %d", conn, *quote);
1481 return conn;
1482}
1483
1484static inline void hci_acl_tx_to(struct hci_dev *hdev)
1485{
1486 struct hci_conn_hash *h = &hdev->conn_hash;
1487 struct list_head *p;
1488 struct hci_conn *c;
1489
1490 BT_ERR("%s ACL tx timeout", hdev->name);
1491
1492 /* Kill stalled connections */
1493 list_for_each(p, &h->list) {
1494 c = list_entry(p, struct hci_conn, list);
1495 if (c->type == ACL_LINK && c->sent) {
1496 BT_ERR("%s killing stalled ACL connection %s",
1497 hdev->name, batostr(&c->dst));
1498 hci_acl_disconn(c, 0x13);
1499 }
1500 }
1501}
1502
1503static inline void hci_sched_acl(struct hci_dev *hdev)
1504{
1505 struct hci_conn *conn;
1506 struct sk_buff *skb;
1507 int quote;
1508
1509 BT_DBG("%s", hdev->name);
1510
1511 if (!test_bit(HCI_RAW, &hdev->flags)) {
1512 /* ACL tx timeout must be longer than maximum
1513 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001514 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 hci_acl_tx_to(hdev);
1516 }
1517
1518 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1519 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1520 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001521
1522 hci_conn_enter_active_mode(conn);
1523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 hci_send_frame(skb);
1525 hdev->acl_last_tx = jiffies;
1526
1527 hdev->acl_cnt--;
1528 conn->sent++;
1529 }
1530 }
1531}
1532
1533/* Schedule SCO */
1534static inline void hci_sched_sco(struct hci_dev *hdev)
1535{
1536 struct hci_conn *conn;
1537 struct sk_buff *skb;
1538 int quote;
1539
1540 BT_DBG("%s", hdev->name);
1541
1542 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1543 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1544 BT_DBG("skb %p len %d", skb, skb->len);
1545 hci_send_frame(skb);
1546
1547 conn->sent++;
1548 if (conn->sent == ~0)
1549 conn->sent = 0;
1550 }
1551 }
1552}
1553
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001554static inline void hci_sched_esco(struct hci_dev *hdev)
1555{
1556 struct hci_conn *conn;
1557 struct sk_buff *skb;
1558 int quote;
1559
1560 BT_DBG("%s", hdev->name);
1561
1562 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1563 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1564 BT_DBG("skb %p len %d", skb, skb->len);
1565 hci_send_frame(skb);
1566
1567 conn->sent++;
1568 if (conn->sent == ~0)
1569 conn->sent = 0;
1570 }
1571 }
1572}
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574static void hci_tx_task(unsigned long arg)
1575{
1576 struct hci_dev *hdev = (struct hci_dev *) arg;
1577 struct sk_buff *skb;
1578
1579 read_lock(&hci_task_lock);
1580
1581 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1582
1583 /* Schedule queues and send stuff to HCI driver */
1584
1585 hci_sched_acl(hdev);
1586
1587 hci_sched_sco(hdev);
1588
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001589 hci_sched_esco(hdev);
1590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 /* Send next queued raw (unknown type) packet */
1592 while ((skb = skb_dequeue(&hdev->raw_q)))
1593 hci_send_frame(skb);
1594
1595 read_unlock(&hci_task_lock);
1596}
1597
1598/* ----- HCI RX task (incoming data proccessing) ----- */
1599
1600/* ACL data packet */
1601static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1602{
1603 struct hci_acl_hdr *hdr = (void *) skb->data;
1604 struct hci_conn *conn;
1605 __u16 handle, flags;
1606
1607 skb_pull(skb, HCI_ACL_HDR_SIZE);
1608
1609 handle = __le16_to_cpu(hdr->handle);
1610 flags = hci_flags(handle);
1611 handle = hci_handle(handle);
1612
1613 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1614
1615 hdev->stat.acl_rx++;
1616
1617 hci_dev_lock(hdev);
1618 conn = hci_conn_hash_lookup_handle(hdev, handle);
1619 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 if (conn) {
1622 register struct hci_proto *hp;
1623
Marcel Holtmann04837f62006-07-03 10:02:33 +02001624 hci_conn_enter_active_mode(conn);
1625
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001627 hp = hci_proto[HCI_PROTO_L2CAP];
1628 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 hp->recv_acldata(conn, skb, flags);
1630 return;
1631 }
1632 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001633 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 hdev->name, handle);
1635 }
1636
1637 kfree_skb(skb);
1638}
1639
1640/* SCO data packet */
1641static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1642{
1643 struct hci_sco_hdr *hdr = (void *) skb->data;
1644 struct hci_conn *conn;
1645 __u16 handle;
1646
1647 skb_pull(skb, HCI_SCO_HDR_SIZE);
1648
1649 handle = __le16_to_cpu(hdr->handle);
1650
1651 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1652
1653 hdev->stat.sco_rx++;
1654
1655 hci_dev_lock(hdev);
1656 conn = hci_conn_hash_lookup_handle(hdev, handle);
1657 hci_dev_unlock(hdev);
1658
1659 if (conn) {
1660 register struct hci_proto *hp;
1661
1662 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001663 hp = hci_proto[HCI_PROTO_SCO];
1664 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 hp->recv_scodata(conn, skb);
1666 return;
1667 }
1668 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001669 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 hdev->name, handle);
1671 }
1672
1673 kfree_skb(skb);
1674}
1675
Marcel Holtmann65164552005-10-28 19:20:48 +02001676static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677{
1678 struct hci_dev *hdev = (struct hci_dev *) arg;
1679 struct sk_buff *skb;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 read_lock(&hci_task_lock);
1684
1685 while ((skb = skb_dequeue(&hdev->rx_q))) {
1686 if (atomic_read(&hdev->promisc)) {
1687 /* Send copy to the sockets */
1688 hci_send_to_sock(hdev, skb);
1689 }
1690
1691 if (test_bit(HCI_RAW, &hdev->flags)) {
1692 kfree_skb(skb);
1693 continue;
1694 }
1695
1696 if (test_bit(HCI_INIT, &hdev->flags)) {
1697 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001698 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 case HCI_ACLDATA_PKT:
1700 case HCI_SCODATA_PKT:
1701 kfree_skb(skb);
1702 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001703 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 }
1705
1706 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001707 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 case HCI_EVENT_PKT:
1709 hci_event_packet(hdev, skb);
1710 break;
1711
1712 case HCI_ACLDATA_PKT:
1713 BT_DBG("%s ACL data packet", hdev->name);
1714 hci_acldata_packet(hdev, skb);
1715 break;
1716
1717 case HCI_SCODATA_PKT:
1718 BT_DBG("%s SCO data packet", hdev->name);
1719 hci_scodata_packet(hdev, skb);
1720 break;
1721
1722 default:
1723 kfree_skb(skb);
1724 break;
1725 }
1726 }
1727
1728 read_unlock(&hci_task_lock);
1729}
1730
1731static void hci_cmd_task(unsigned long arg)
1732{
1733 struct hci_dev *hdev = (struct hci_dev *) arg;
1734 struct sk_buff *skb;
1735
1736 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1737
S.Çağlar Onur82453022008-02-17 23:25:57 -08001738 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 BT_ERR("%s command tx timeout", hdev->name);
1740 atomic_set(&hdev->cmd_cnt, 1);
1741 }
1742
1743 /* Send queued commands */
1744 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001745 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001747 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1748 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 atomic_dec(&hdev->cmd_cnt);
1750 hci_send_frame(skb);
1751 hdev->cmd_last_tx = jiffies;
1752 } else {
1753 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001754 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 }
1756 }
1757}