blob: b22ce9f8bf910d2525770e01ed4f85bc2e23c278 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If the request has set req_last_cmd (typical for multi-HCI
101 * command requests) check if the completed command matches
102 * this, and if not just return. Single HCI command requests
103 * typically leave req_last_cmd as 0 */
104 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_err(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberg23bb5762010-12-21 23:01:27 +0200161 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg23bb5762010-12-21 23:01:27 +0200264
265 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200294 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
Marcel Holtmanna418b892008-11-30 12:17:28 +0100302 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900308/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
312 struct hci_dev *hdev = NULL;
313 struct list_head *p;
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
321 list_for_each(p, &hci_dev_list) {
322 struct hci_dev *d = list_entry(p, struct hci_dev, list);
323 if (d->id == index) {
324 hdev = hci_dev_hold(d);
325 break;
326 }
327 }
328 read_unlock(&hci_dev_list_lock);
329 return hdev;
330}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/* ---- Inquiry support ---- */
333static void inquiry_cache_flush(struct hci_dev *hdev)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *next = cache->list, *e;
337
338 BT_DBG("cache %p", cache);
339
340 cache->list = NULL;
341 while ((e = next)) {
342 next = e->next;
343 kfree(e);
344 }
345}
346
347struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
348{
349 struct inquiry_cache *cache = &hdev->inq_cache;
350 struct inquiry_entry *e;
351
352 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
353
354 for (e = cache->list; e; e = e->next)
355 if (!bacmp(&e->data.bdaddr, bdaddr))
356 break;
357 return e;
358}
359
360void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200363 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200367 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
368 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200370 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
371 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373
374 ie->next = cache->list;
375 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 }
377
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 memcpy(&ie->data, data, sizeof(*data));
379 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 cache->timestamp = jiffies;
381}
382
383static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
386 struct inquiry_info *info = (struct inquiry_info *) buf;
387 struct inquiry_entry *e;
388 int copied = 0;
389
390 for (e = cache->list; e && copied < num; e = e->next, copied++) {
391 struct inquiry_data *data = &e->data;
392 bacpy(&info->bdaddr, &data->bdaddr);
393 info->pscan_rep_mode = data->pscan_rep_mode;
394 info->pscan_period_mode = data->pscan_period_mode;
395 info->pscan_mode = data->pscan_mode;
396 memcpy(info->dev_class, data->dev_class, 3);
397 info->clock_offset = data->clock_offset;
398 info++;
399 }
400
401 BT_DBG("cache %p, copied %d", cache, copied);
402 return copied;
403}
404
405static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
406{
407 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
408 struct hci_cp_inquiry cp;
409
410 BT_DBG("%s", hdev->name);
411
412 if (test_bit(HCI_INQUIRY, &hdev->flags))
413 return;
414
415 /* Start Inquiry */
416 memcpy(&cp.lap, &ir->lap, 3);
417 cp.length = ir->length;
418 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200419 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
422int hci_inquiry(void __user *arg)
423{
424 __u8 __user *ptr = arg;
425 struct hci_inquiry_req ir;
426 struct hci_dev *hdev;
427 int err = 0, do_inquiry = 0, max_rsp;
428 long timeo;
429 __u8 *buf;
430
431 if (copy_from_user(&ir, ptr, sizeof(ir)))
432 return -EFAULT;
433
434 if (!(hdev = hci_dev_get(ir.dev_id)))
435 return -ENODEV;
436
437 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
444 hci_dev_unlock_bh(hdev);
445
Marcel Holtmann04837f62006-07-03 10:02:33 +0200446 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200460 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
461 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 err = -ENOMEM;
463 goto done;
464 }
465
466 hci_dev_lock_bh(hdev);
467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468 hci_dev_unlock_bh(hdev);
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900477 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
494 if (!(hdev = hci_dev_get(dev)))
495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
Marcel Holtmann943da252010-02-13 02:28:41 +0100514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev->dev_type != HCI_BREDR)
516 set_bit(HCI_RAW, &hdev->flags);
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (hdev->open(hdev)) {
519 ret = -EIO;
520 goto done;
521 }
522
523 if (!test_bit(HCI_RAW, &hdev->flags)) {
524 atomic_set(&hdev->cmd_cnt, 1);
525 set_bit(HCI_INIT, &hdev->flags);
526
527 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200528 ret = __hci_request(hdev, hci_init_req, 0,
529 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 clear_bit(HCI_INIT, &hdev->flags);
532 }
533
534 if (!ret) {
535 hci_dev_hold(hdev);
536 set_bit(HCI_UP, &hdev->flags);
537 hci_notify(hdev, HCI_DEV_UP);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900538 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 /* Init failed, cleanup */
540 tasklet_kill(&hdev->rx_task);
541 tasklet_kill(&hdev->tx_task);
542 tasklet_kill(&hdev->cmd_task);
543
544 skb_queue_purge(&hdev->cmd_q);
545 skb_queue_purge(&hdev->rx_q);
546
547 if (hdev->flush)
548 hdev->flush(hdev);
549
550 if (hdev->sent_cmd) {
551 kfree_skb(hdev->sent_cmd);
552 hdev->sent_cmd = NULL;
553 }
554
555 hdev->close(hdev);
556 hdev->flags = 0;
557 }
558
559done:
560 hci_req_unlock(hdev);
561 hci_dev_put(hdev);
562 return ret;
563}
564
565static int hci_dev_do_close(struct hci_dev *hdev)
566{
567 BT_DBG("%s %p", hdev->name, hdev);
568
569 hci_req_cancel(hdev, ENODEV);
570 hci_req_lock(hdev);
571
572 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
573 hci_req_unlock(hdev);
574 return 0;
575 }
576
577 /* Kill RX and TX tasks */
578 tasklet_kill(&hdev->rx_task);
579 tasklet_kill(&hdev->tx_task);
580
581 hci_dev_lock_bh(hdev);
582 inquiry_cache_flush(hdev);
583 hci_conn_hash_flush(hdev);
584 hci_dev_unlock_bh(hdev);
585
586 hci_notify(hdev, HCI_DEV_DOWN);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 /* Reset device */
592 skb_queue_purge(&hdev->cmd_q);
593 atomic_set(&hdev->cmd_cnt, 1);
594 if (!test_bit(HCI_RAW, &hdev->flags)) {
595 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200596 __hci_request(hdev, hci_reset_req, 0,
597 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 clear_bit(HCI_INIT, &hdev->flags);
599 }
600
601 /* Kill cmd task */
602 tasklet_kill(&hdev->cmd_task);
603
604 /* Drop queues */
605 skb_queue_purge(&hdev->rx_q);
606 skb_queue_purge(&hdev->cmd_q);
607 skb_queue_purge(&hdev->raw_q);
608
609 /* Drop last sent command */
610 if (hdev->sent_cmd) {
611 kfree_skb(hdev->sent_cmd);
612 hdev->sent_cmd = NULL;
613 }
614
615 /* After this point our queues are empty
616 * and no tasks are scheduled. */
617 hdev->close(hdev);
618
619 /* Clear flags */
620 hdev->flags = 0;
621
622 hci_req_unlock(hdev);
623
624 hci_dev_put(hdev);
625 return 0;
626}
627
628int hci_dev_close(__u16 dev)
629{
630 struct hci_dev *hdev;
631 int err;
632
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200633 hdev = hci_dev_get(dev);
634 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 return -ENODEV;
636 err = hci_dev_do_close(hdev);
637 hci_dev_put(hdev);
638 return err;
639}
640
641int hci_dev_reset(__u16 dev)
642{
643 struct hci_dev *hdev;
644 int ret = 0;
645
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200646 hdev = hci_dev_get(dev);
647 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 return -ENODEV;
649
650 hci_req_lock(hdev);
651 tasklet_disable(&hdev->tx_task);
652
653 if (!test_bit(HCI_UP, &hdev->flags))
654 goto done;
655
656 /* Drop queues */
657 skb_queue_purge(&hdev->rx_q);
658 skb_queue_purge(&hdev->cmd_q);
659
660 hci_dev_lock_bh(hdev);
661 inquiry_cache_flush(hdev);
662 hci_conn_hash_flush(hdev);
663 hci_dev_unlock_bh(hdev);
664
665 if (hdev->flush)
666 hdev->flush(hdev);
667
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900668 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
670
671 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200672 ret = __hci_request(hdev, hci_reset_req, 0,
673 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
675done:
676 tasklet_enable(&hdev->tx_task);
677 hci_req_unlock(hdev);
678 hci_dev_put(hdev);
679 return ret;
680}
681
682int hci_dev_reset_stat(__u16 dev)
683{
684 struct hci_dev *hdev;
685 int ret = 0;
686
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200687 hdev = hci_dev_get(dev);
688 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 return -ENODEV;
690
691 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
692
693 hci_dev_put(hdev);
694
695 return ret;
696}
697
698int hci_dev_cmd(unsigned int cmd, void __user *arg)
699{
700 struct hci_dev *hdev;
701 struct hci_dev_req dr;
702 int err = 0;
703
704 if (copy_from_user(&dr, arg, sizeof(dr)))
705 return -EFAULT;
706
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200707 hdev = hci_dev_get(dr.dev_id);
708 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 return -ENODEV;
710
711 switch (cmd) {
712 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200713 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
714 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 break;
716
717 case HCISETENCRYPT:
718 if (!lmp_encrypt_capable(hdev)) {
719 err = -EOPNOTSUPP;
720 break;
721 }
722
723 if (!test_bit(HCI_AUTH, &hdev->flags)) {
724 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200725 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 if (err)
728 break;
729 }
730
Marcel Holtmann04837f62006-07-03 10:02:33 +0200731 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
732 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 break;
734
735 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200736 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 break;
739
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200740 case HCISETLINKPOL:
741 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
742 msecs_to_jiffies(HCI_INIT_TIMEOUT));
743 break;
744
745 case HCISETLINKMODE:
746 hdev->link_mode = ((__u16) dr.dev_opt) &
747 (HCI_LM_MASTER | HCI_LM_ACCEPT);
748 break;
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 case HCISETPTYPE:
751 hdev->pkt_type = (__u16) dr.dev_opt;
752 break;
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200755 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
756 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 break;
758
759 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200760 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
761 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
763
764 default:
765 err = -EINVAL;
766 break;
767 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200768
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 hci_dev_put(hdev);
770 return err;
771}
772
773int hci_get_dev_list(void __user *arg)
774{
775 struct hci_dev_list_req *dl;
776 struct hci_dev_req *dr;
777 struct list_head *p;
778 int n = 0, size, err;
779 __u16 dev_num;
780
781 if (get_user(dev_num, (__u16 __user *) arg))
782 return -EFAULT;
783
784 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
785 return -EINVAL;
786
787 size = sizeof(*dl) + dev_num * sizeof(*dr);
788
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200789 dl = kzalloc(size, GFP_KERNEL);
790 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 return -ENOMEM;
792
793 dr = dl->dev_req;
794
795 read_lock_bh(&hci_dev_list_lock);
796 list_for_each(p, &hci_dev_list) {
797 struct hci_dev *hdev;
798 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200799 hci_del_off_timer(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 (dr + n)->dev_id = hdev->id;
801 (dr + n)->dev_opt = hdev->flags;
802 if (++n >= dev_num)
803 break;
804 }
805 read_unlock_bh(&hci_dev_list_lock);
806
807 dl->dev_num = n;
808 size = sizeof(*dl) + n * sizeof(*dr);
809
810 err = copy_to_user(arg, dl, size);
811 kfree(dl);
812
813 return err ? -EFAULT : 0;
814}
815
816int hci_get_dev_info(void __user *arg)
817{
818 struct hci_dev *hdev;
819 struct hci_dev_info di;
820 int err = 0;
821
822 if (copy_from_user(&di, arg, sizeof(di)))
823 return -EFAULT;
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 hdev = hci_dev_get(di.dev_id);
826 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENODEV;
828
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200829 hci_del_off_timer(hdev);
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 strcpy(di.name, hdev->name);
832 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100833 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 di.flags = hdev->flags;
835 di.pkt_type = hdev->pkt_type;
836 di.acl_mtu = hdev->acl_mtu;
837 di.acl_pkts = hdev->acl_pkts;
838 di.sco_mtu = hdev->sco_mtu;
839 di.sco_pkts = hdev->sco_pkts;
840 di.link_policy = hdev->link_policy;
841 di.link_mode = hdev->link_mode;
842
843 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
844 memcpy(&di.features, &hdev->features, sizeof(di.features));
845
846 if (copy_to_user(arg, &di, sizeof(di)))
847 err = -EFAULT;
848
849 hci_dev_put(hdev);
850
851 return err;
852}
853
854/* ---- Interface to HCI drivers ---- */
855
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200856static int hci_rfkill_set_block(void *data, bool blocked)
857{
858 struct hci_dev *hdev = data;
859
860 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
861
862 if (!blocked)
863 return 0;
864
865 hci_dev_do_close(hdev);
866
867 return 0;
868}
869
870static const struct rfkill_ops hci_rfkill_ops = {
871 .set_block = hci_rfkill_set_block,
872};
873
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874/* Alloc HCI device */
875struct hci_dev *hci_alloc_dev(void)
876{
877 struct hci_dev *hdev;
878
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200879 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (!hdev)
881 return NULL;
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 skb_queue_head_init(&hdev->driver_init);
884
885 return hdev;
886}
887EXPORT_SYMBOL(hci_alloc_dev);
888
889/* Free HCI device */
890void hci_free_dev(struct hci_dev *hdev)
891{
892 skb_queue_purge(&hdev->driver_init);
893
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200894 /* will free via device release */
895 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896}
897EXPORT_SYMBOL(hci_free_dev);
898
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200899static void hci_power_on(struct work_struct *work)
900{
901 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
902
903 BT_DBG("%s", hdev->name);
904
905 if (hci_dev_open(hdev->id) < 0)
906 return;
907
908 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
909 mod_timer(&hdev->off_timer,
910 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
911
912 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
913 mgmt_index_added(hdev->id);
914}
915
916static void hci_power_off(struct work_struct *work)
917{
918 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
919
920 BT_DBG("%s", hdev->name);
921
922 hci_dev_close(hdev->id);
923}
924
925static void hci_auto_off(unsigned long data)
926{
927 struct hci_dev *hdev = (struct hci_dev *) data;
928
929 BT_DBG("%s", hdev->name);
930
931 clear_bit(HCI_AUTO_OFF, &hdev->flags);
932
933 queue_work(hdev->workqueue, &hdev->power_off);
934}
935
936void hci_del_off_timer(struct hci_dev *hdev)
937{
938 BT_DBG("%s", hdev->name);
939
940 clear_bit(HCI_AUTO_OFF, &hdev->flags);
941 del_timer(&hdev->off_timer);
942}
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944/* Register HCI device */
945int hci_register_dev(struct hci_dev *hdev)
946{
947 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200948 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100950 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
951 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
953 if (!hdev->open || !hdev->close || !hdev->destruct)
954 return -EINVAL;
955
956 write_lock_bh(&hci_dev_list_lock);
957
958 /* Find first available device id */
959 list_for_each(p, &hci_dev_list) {
960 if (list_entry(p, struct hci_dev, list)->id != id)
961 break;
962 head = p; id++;
963 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 sprintf(hdev->name, "hci%d", id);
966 hdev->id = id;
967 list_add(&hdev->list, head);
968
969 atomic_set(&hdev->refcnt, 1);
970 spin_lock_init(&hdev->lock);
971
972 hdev->flags = 0;
973 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +0200974 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 hdev->link_mode = (HCI_LM_ACCEPT);
976
Marcel Holtmann04837f62006-07-03 10:02:33 +0200977 hdev->idle_timeout = 0;
978 hdev->sniff_max_interval = 800;
979 hdev->sniff_min_interval = 80;
980
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
983 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
984
985 skb_queue_head_init(&hdev->rx_q);
986 skb_queue_head_init(&hdev->cmd_q);
987 skb_queue_head_init(&hdev->raw_q);
988
Suraj Sumangalacd4c5392010-07-14 13:02:16 +0530989 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +0200990 hdev->reassembly[i] = NULL;
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +0000993 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 inquiry_cache_init(hdev);
996
997 hci_conn_hash_init(hdev);
998
David Millerea4bd8b2010-07-30 21:54:49 -0700999 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001000
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001001 INIT_WORK(&hdev->power_on, hci_power_on);
1002 INIT_WORK(&hdev->power_off, hci_power_off);
1003 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1006
1007 atomic_set(&hdev->promisc, 0);
1008
1009 write_unlock_bh(&hci_dev_list_lock);
1010
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001011 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1012 if (!hdev->workqueue)
1013 goto nomem;
1014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 hci_register_sysfs(hdev);
1016
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001017 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1018 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1019 if (hdev->rfkill) {
1020 if (rfkill_register(hdev->rfkill) < 0) {
1021 rfkill_destroy(hdev->rfkill);
1022 hdev->rfkill = NULL;
1023 }
1024 }
1025
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001026 set_bit(HCI_AUTO_OFF, &hdev->flags);
1027 set_bit(HCI_SETUP, &hdev->flags);
1028 queue_work(hdev->workqueue, &hdev->power_on);
1029
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 hci_notify(hdev, HCI_DEV_REG);
1031
1032 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001033
1034nomem:
1035 write_lock_bh(&hci_dev_list_lock);
1036 list_del(&hdev->list);
1037 write_unlock_bh(&hci_dev_list_lock);
1038
1039 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040}
1041EXPORT_SYMBOL(hci_register_dev);
1042
1043/* Unregister HCI device */
1044int hci_unregister_dev(struct hci_dev *hdev)
1045{
Marcel Holtmannef222012007-07-11 06:42:04 +02001046 int i;
1047
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001048 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 write_lock_bh(&hci_dev_list_lock);
1051 list_del(&hdev->list);
1052 write_unlock_bh(&hci_dev_list_lock);
1053
1054 hci_dev_do_close(hdev);
1055
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301056 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001057 kfree_skb(hdev->reassembly[i]);
1058
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001059 if (!test_bit(HCI_INIT, &hdev->flags) &&
1060 !test_bit(HCI_SETUP, &hdev->flags))
1061 mgmt_index_removed(hdev->id);
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 hci_notify(hdev, HCI_DEV_UNREG);
1064
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001065 if (hdev->rfkill) {
1066 rfkill_unregister(hdev->rfkill);
1067 rfkill_destroy(hdev->rfkill);
1068 }
1069
Dave Young147e2d52008-03-05 18:45:59 -08001070 hci_unregister_sysfs(hdev);
1071
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001072 destroy_workqueue(hdev->workqueue);
1073
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001074 hci_dev_lock_bh(hdev);
1075 hci_blacklist_clear(hdev);
1076 hci_dev_unlock_bh(hdev);
1077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 return 0;
1081}
1082EXPORT_SYMBOL(hci_unregister_dev);
1083
1084/* Suspend HCI device */
1085int hci_suspend_dev(struct hci_dev *hdev)
1086{
1087 hci_notify(hdev, HCI_DEV_SUSPEND);
1088 return 0;
1089}
1090EXPORT_SYMBOL(hci_suspend_dev);
1091
1092/* Resume HCI device */
1093int hci_resume_dev(struct hci_dev *hdev)
1094{
1095 hci_notify(hdev, HCI_DEV_RESUME);
1096 return 0;
1097}
1098EXPORT_SYMBOL(hci_resume_dev);
1099
Marcel Holtmann76bca882009-11-18 00:40:39 +01001100/* Receive frame from HCI drivers */
1101int hci_recv_frame(struct sk_buff *skb)
1102{
1103 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1104 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1105 && !test_bit(HCI_INIT, &hdev->flags))) {
1106 kfree_skb(skb);
1107 return -ENXIO;
1108 }
1109
1110 /* Incomming skb */
1111 bt_cb(skb)->incoming = 1;
1112
1113 /* Time stamp */
1114 __net_timestamp(skb);
1115
1116 /* Queue frame for rx task */
1117 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001118 tasklet_schedule(&hdev->rx_task);
1119
Marcel Holtmann76bca882009-11-18 00:40:39 +01001120 return 0;
1121}
1122EXPORT_SYMBOL(hci_recv_frame);
1123
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301124static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1125 int count, __u8 index, gfp_t gfp_mask)
1126{
1127 int len = 0;
1128 int hlen = 0;
1129 int remain = count;
1130 struct sk_buff *skb;
1131 struct bt_skb_cb *scb;
1132
1133 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1134 index >= NUM_REASSEMBLY)
1135 return -EILSEQ;
1136
1137 skb = hdev->reassembly[index];
1138
1139 if (!skb) {
1140 switch (type) {
1141 case HCI_ACLDATA_PKT:
1142 len = HCI_MAX_FRAME_SIZE;
1143 hlen = HCI_ACL_HDR_SIZE;
1144 break;
1145 case HCI_EVENT_PKT:
1146 len = HCI_MAX_EVENT_SIZE;
1147 hlen = HCI_EVENT_HDR_SIZE;
1148 break;
1149 case HCI_SCODATA_PKT:
1150 len = HCI_MAX_SCO_SIZE;
1151 hlen = HCI_SCO_HDR_SIZE;
1152 break;
1153 }
1154
1155 skb = bt_skb_alloc(len, gfp_mask);
1156 if (!skb)
1157 return -ENOMEM;
1158
1159 scb = (void *) skb->cb;
1160 scb->expect = hlen;
1161 scb->pkt_type = type;
1162
1163 skb->dev = (void *) hdev;
1164 hdev->reassembly[index] = skb;
1165 }
1166
1167 while (count) {
1168 scb = (void *) skb->cb;
1169 len = min(scb->expect, (__u16)count);
1170
1171 memcpy(skb_put(skb, len), data, len);
1172
1173 count -= len;
1174 data += len;
1175 scb->expect -= len;
1176 remain = count;
1177
1178 switch (type) {
1179 case HCI_EVENT_PKT:
1180 if (skb->len == HCI_EVENT_HDR_SIZE) {
1181 struct hci_event_hdr *h = hci_event_hdr(skb);
1182 scb->expect = h->plen;
1183
1184 if (skb_tailroom(skb) < scb->expect) {
1185 kfree_skb(skb);
1186 hdev->reassembly[index] = NULL;
1187 return -ENOMEM;
1188 }
1189 }
1190 break;
1191
1192 case HCI_ACLDATA_PKT:
1193 if (skb->len == HCI_ACL_HDR_SIZE) {
1194 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1195 scb->expect = __le16_to_cpu(h->dlen);
1196
1197 if (skb_tailroom(skb) < scb->expect) {
1198 kfree_skb(skb);
1199 hdev->reassembly[index] = NULL;
1200 return -ENOMEM;
1201 }
1202 }
1203 break;
1204
1205 case HCI_SCODATA_PKT:
1206 if (skb->len == HCI_SCO_HDR_SIZE) {
1207 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1208 scb->expect = h->dlen;
1209
1210 if (skb_tailroom(skb) < scb->expect) {
1211 kfree_skb(skb);
1212 hdev->reassembly[index] = NULL;
1213 return -ENOMEM;
1214 }
1215 }
1216 break;
1217 }
1218
1219 if (scb->expect == 0) {
1220 /* Complete frame */
1221
1222 bt_cb(skb)->pkt_type = type;
1223 hci_recv_frame(skb);
1224
1225 hdev->reassembly[index] = NULL;
1226 return remain;
1227 }
1228 }
1229
1230 return remain;
1231}
1232
Marcel Holtmannef222012007-07-11 06:42:04 +02001233int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1234{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301235 int rem = 0;
1236
Marcel Holtmannef222012007-07-11 06:42:04 +02001237 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1238 return -EILSEQ;
1239
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001240 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301241 rem = hci_reassembly(hdev, type, data, count,
1242 type - 1, GFP_ATOMIC);
1243 if (rem < 0)
1244 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001245
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301246 data += (count - rem);
1247 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001248 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001249
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301250 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001251}
1252EXPORT_SYMBOL(hci_recv_fragment);
1253
Suraj Sumangala99811512010-07-14 13:02:19 +05301254#define STREAM_REASSEMBLY 0
1255
1256int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1257{
1258 int type;
1259 int rem = 0;
1260
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001261 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301262 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1263
1264 if (!skb) {
1265 struct { char type; } *pkt;
1266
1267 /* Start of the frame */
1268 pkt = data;
1269 type = pkt->type;
1270
1271 data++;
1272 count--;
1273 } else
1274 type = bt_cb(skb)->pkt_type;
1275
1276 rem = hci_reassembly(hdev, type, data,
1277 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1278 if (rem < 0)
1279 return rem;
1280
1281 data += (count - rem);
1282 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001283 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301284
1285 return rem;
1286}
1287EXPORT_SYMBOL(hci_recv_stream_fragment);
1288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289/* ---- Interface to upper protocols ---- */
1290
1291/* Register/Unregister protocols.
1292 * hci_task_lock is used to ensure that no tasks are running. */
1293int hci_register_proto(struct hci_proto *hp)
1294{
1295 int err = 0;
1296
1297 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1298
1299 if (hp->id >= HCI_MAX_PROTO)
1300 return -EINVAL;
1301
1302 write_lock_bh(&hci_task_lock);
1303
1304 if (!hci_proto[hp->id])
1305 hci_proto[hp->id] = hp;
1306 else
1307 err = -EEXIST;
1308
1309 write_unlock_bh(&hci_task_lock);
1310
1311 return err;
1312}
1313EXPORT_SYMBOL(hci_register_proto);
1314
1315int hci_unregister_proto(struct hci_proto *hp)
1316{
1317 int err = 0;
1318
1319 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1320
1321 if (hp->id >= HCI_MAX_PROTO)
1322 return -EINVAL;
1323
1324 write_lock_bh(&hci_task_lock);
1325
1326 if (hci_proto[hp->id])
1327 hci_proto[hp->id] = NULL;
1328 else
1329 err = -ENOENT;
1330
1331 write_unlock_bh(&hci_task_lock);
1332
1333 return err;
1334}
1335EXPORT_SYMBOL(hci_unregister_proto);
1336
1337int hci_register_cb(struct hci_cb *cb)
1338{
1339 BT_DBG("%p name %s", cb, cb->name);
1340
1341 write_lock_bh(&hci_cb_list_lock);
1342 list_add(&cb->list, &hci_cb_list);
1343 write_unlock_bh(&hci_cb_list_lock);
1344
1345 return 0;
1346}
1347EXPORT_SYMBOL(hci_register_cb);
1348
1349int hci_unregister_cb(struct hci_cb *cb)
1350{
1351 BT_DBG("%p name %s", cb, cb->name);
1352
1353 write_lock_bh(&hci_cb_list_lock);
1354 list_del(&cb->list);
1355 write_unlock_bh(&hci_cb_list_lock);
1356
1357 return 0;
1358}
1359EXPORT_SYMBOL(hci_unregister_cb);
1360
1361static int hci_send_frame(struct sk_buff *skb)
1362{
1363 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1364
1365 if (!hdev) {
1366 kfree_skb(skb);
1367 return -ENODEV;
1368 }
1369
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001370 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372 if (atomic_read(&hdev->promisc)) {
1373 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001374 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
1376 hci_send_to_sock(hdev, skb);
1377 }
1378
1379 /* Get rid of skb owner, prior to sending to the driver. */
1380 skb_orphan(skb);
1381
1382 return hdev->send(skb);
1383}
1384
1385/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001386int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
1388 int len = HCI_COMMAND_HDR_SIZE + plen;
1389 struct hci_command_hdr *hdr;
1390 struct sk_buff *skb;
1391
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001392 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
1394 skb = bt_skb_alloc(len, GFP_ATOMIC);
1395 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001396 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 return -ENOMEM;
1398 }
1399
1400 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001401 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 hdr->plen = plen;
1403
1404 if (plen)
1405 memcpy(skb_put(skb, plen), param, plen);
1406
1407 BT_DBG("skb len %d", skb->len);
1408
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001409 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001413 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
1415 return 0;
1416}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001419void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
1421 struct hci_command_hdr *hdr;
1422
1423 if (!hdev->sent_cmd)
1424 return NULL;
1425
1426 hdr = (void *) hdev->sent_cmd->data;
1427
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001428 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 return NULL;
1430
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001431 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
1433 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1434}
1435
1436/* Send ACL data */
1437static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1438{
1439 struct hci_acl_hdr *hdr;
1440 int len = skb->len;
1441
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001442 skb_push(skb, HCI_ACL_HDR_SIZE);
1443 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001444 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001445 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1446 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001449void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450{
1451 struct hci_dev *hdev = conn->hdev;
1452 struct sk_buff *list;
1453
1454 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1455
1456 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001457 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001458 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001460 list = skb_shinfo(skb)->frag_list;
1461 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 /* Non fragmented */
1463 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1464
1465 skb_queue_tail(&conn->data_q, skb);
1466 } else {
1467 /* Fragmented */
1468 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1469
1470 skb_shinfo(skb)->frag_list = NULL;
1471
1472 /* Queue all fragments atomically */
1473 spin_lock_bh(&conn->data_q.lock);
1474
1475 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001476
1477 flags &= ~ACL_START;
1478 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 do {
1480 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001483 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001484 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1487
1488 __skb_queue_tail(&conn->data_q, skb);
1489 } while (list);
1490
1491 spin_unlock_bh(&conn->data_q.lock);
1492 }
1493
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001494 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495}
1496EXPORT_SYMBOL(hci_send_acl);
1497
1498/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001499void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500{
1501 struct hci_dev *hdev = conn->hdev;
1502 struct hci_sco_hdr hdr;
1503
1504 BT_DBG("%s len %d", hdev->name, skb->len);
1505
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001506 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 hdr.dlen = skb->len;
1508
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001509 skb_push(skb, HCI_SCO_HDR_SIZE);
1510 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001511 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001514 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001515
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001517 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518}
1519EXPORT_SYMBOL(hci_send_sco);
1520
1521/* ---- HCI TX task (outgoing data) ---- */
1522
1523/* HCI Connection scheduler */
1524static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1525{
1526 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001527 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 int num = 0, min = ~0;
1529 struct list_head *p;
1530
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001531 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 * added and removed with TX task disabled. */
1533 list_for_each(p, &h->list) {
1534 struct hci_conn *c;
1535 c = list_entry(p, struct hci_conn, list);
1536
Marcel Holtmann769be972008-07-14 20:13:49 +02001537 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001539
1540 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1541 continue;
1542
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 num++;
1544
1545 if (c->sent < min) {
1546 min = c->sent;
1547 conn = c;
1548 }
1549 }
1550
1551 if (conn) {
1552 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1553 int q = cnt / num;
1554 *quote = q ? q : 1;
1555 } else
1556 *quote = 0;
1557
1558 BT_DBG("conn %p quote %d", conn, *quote);
1559 return conn;
1560}
1561
1562static inline void hci_acl_tx_to(struct hci_dev *hdev)
1563{
1564 struct hci_conn_hash *h = &hdev->conn_hash;
1565 struct list_head *p;
1566 struct hci_conn *c;
1567
1568 BT_ERR("%s ACL tx timeout", hdev->name);
1569
1570 /* Kill stalled connections */
1571 list_for_each(p, &h->list) {
1572 c = list_entry(p, struct hci_conn, list);
1573 if (c->type == ACL_LINK && c->sent) {
1574 BT_ERR("%s killing stalled ACL connection %s",
1575 hdev->name, batostr(&c->dst));
1576 hci_acl_disconn(c, 0x13);
1577 }
1578 }
1579}
1580
1581static inline void hci_sched_acl(struct hci_dev *hdev)
1582{
1583 struct hci_conn *conn;
1584 struct sk_buff *skb;
1585 int quote;
1586
1587 BT_DBG("%s", hdev->name);
1588
1589 if (!test_bit(HCI_RAW, &hdev->flags)) {
1590 /* ACL tx timeout must be longer than maximum
1591 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001592 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 hci_acl_tx_to(hdev);
1594 }
1595
1596 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1597 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1598 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001599
1600 hci_conn_enter_active_mode(conn);
1601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 hci_send_frame(skb);
1603 hdev->acl_last_tx = jiffies;
1604
1605 hdev->acl_cnt--;
1606 conn->sent++;
1607 }
1608 }
1609}
1610
1611/* Schedule SCO */
1612static inline void hci_sched_sco(struct hci_dev *hdev)
1613{
1614 struct hci_conn *conn;
1615 struct sk_buff *skb;
1616 int quote;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1621 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1622 BT_DBG("skb %p len %d", skb, skb->len);
1623 hci_send_frame(skb);
1624
1625 conn->sent++;
1626 if (conn->sent == ~0)
1627 conn->sent = 0;
1628 }
1629 }
1630}
1631
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001632static inline void hci_sched_esco(struct hci_dev *hdev)
1633{
1634 struct hci_conn *conn;
1635 struct sk_buff *skb;
1636 int quote;
1637
1638 BT_DBG("%s", hdev->name);
1639
1640 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1641 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1642 BT_DBG("skb %p len %d", skb, skb->len);
1643 hci_send_frame(skb);
1644
1645 conn->sent++;
1646 if (conn->sent == ~0)
1647 conn->sent = 0;
1648 }
1649 }
1650}
1651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652static void hci_tx_task(unsigned long arg)
1653{
1654 struct hci_dev *hdev = (struct hci_dev *) arg;
1655 struct sk_buff *skb;
1656
1657 read_lock(&hci_task_lock);
1658
1659 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1660
1661 /* Schedule queues and send stuff to HCI driver */
1662
1663 hci_sched_acl(hdev);
1664
1665 hci_sched_sco(hdev);
1666
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001667 hci_sched_esco(hdev);
1668
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 /* Send next queued raw (unknown type) packet */
1670 while ((skb = skb_dequeue(&hdev->raw_q)))
1671 hci_send_frame(skb);
1672
1673 read_unlock(&hci_task_lock);
1674}
1675
1676/* ----- HCI RX task (incoming data proccessing) ----- */
1677
1678/* ACL data packet */
1679static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1680{
1681 struct hci_acl_hdr *hdr = (void *) skb->data;
1682 struct hci_conn *conn;
1683 __u16 handle, flags;
1684
1685 skb_pull(skb, HCI_ACL_HDR_SIZE);
1686
1687 handle = __le16_to_cpu(hdr->handle);
1688 flags = hci_flags(handle);
1689 handle = hci_handle(handle);
1690
1691 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1692
1693 hdev->stat.acl_rx++;
1694
1695 hci_dev_lock(hdev);
1696 conn = hci_conn_hash_lookup_handle(hdev, handle);
1697 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 if (conn) {
1700 register struct hci_proto *hp;
1701
Marcel Holtmann04837f62006-07-03 10:02:33 +02001702 hci_conn_enter_active_mode(conn);
1703
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001705 hp = hci_proto[HCI_PROTO_L2CAP];
1706 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707 hp->recv_acldata(conn, skb, flags);
1708 return;
1709 }
1710 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001711 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 hdev->name, handle);
1713 }
1714
1715 kfree_skb(skb);
1716}
1717
1718/* SCO data packet */
1719static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1720{
1721 struct hci_sco_hdr *hdr = (void *) skb->data;
1722 struct hci_conn *conn;
1723 __u16 handle;
1724
1725 skb_pull(skb, HCI_SCO_HDR_SIZE);
1726
1727 handle = __le16_to_cpu(hdr->handle);
1728
1729 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1730
1731 hdev->stat.sco_rx++;
1732
1733 hci_dev_lock(hdev);
1734 conn = hci_conn_hash_lookup_handle(hdev, handle);
1735 hci_dev_unlock(hdev);
1736
1737 if (conn) {
1738 register struct hci_proto *hp;
1739
1740 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001741 hp = hci_proto[HCI_PROTO_SCO];
1742 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 hp->recv_scodata(conn, skb);
1744 return;
1745 }
1746 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001747 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 hdev->name, handle);
1749 }
1750
1751 kfree_skb(skb);
1752}
1753
Marcel Holtmann65164552005-10-28 19:20:48 +02001754static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755{
1756 struct hci_dev *hdev = (struct hci_dev *) arg;
1757 struct sk_buff *skb;
1758
1759 BT_DBG("%s", hdev->name);
1760
1761 read_lock(&hci_task_lock);
1762
1763 while ((skb = skb_dequeue(&hdev->rx_q))) {
1764 if (atomic_read(&hdev->promisc)) {
1765 /* Send copy to the sockets */
1766 hci_send_to_sock(hdev, skb);
1767 }
1768
1769 if (test_bit(HCI_RAW, &hdev->flags)) {
1770 kfree_skb(skb);
1771 continue;
1772 }
1773
1774 if (test_bit(HCI_INIT, &hdev->flags)) {
1775 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001776 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 case HCI_ACLDATA_PKT:
1778 case HCI_SCODATA_PKT:
1779 kfree_skb(skb);
1780 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 }
1783
1784 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001785 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 case HCI_EVENT_PKT:
1787 hci_event_packet(hdev, skb);
1788 break;
1789
1790 case HCI_ACLDATA_PKT:
1791 BT_DBG("%s ACL data packet", hdev->name);
1792 hci_acldata_packet(hdev, skb);
1793 break;
1794
1795 case HCI_SCODATA_PKT:
1796 BT_DBG("%s SCO data packet", hdev->name);
1797 hci_scodata_packet(hdev, skb);
1798 break;
1799
1800 default:
1801 kfree_skb(skb);
1802 break;
1803 }
1804 }
1805
1806 read_unlock(&hci_task_lock);
1807}
1808
1809static void hci_cmd_task(unsigned long arg)
1810{
1811 struct hci_dev *hdev = (struct hci_dev *) arg;
1812 struct sk_buff *skb;
1813
1814 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1815
S.Çağlar Onur82453022008-02-17 23:25:57 -08001816 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 BT_ERR("%s command tx timeout", hdev->name);
1818 atomic_set(&hdev->cmd_cnt, 1);
1819 }
1820
1821 /* Send queued commands */
1822 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001823 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001825 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1826 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 atomic_dec(&hdev->cmd_cnt);
1828 hci_send_frame(skb);
1829 hdev->cmd_last_tx = jiffies;
1830 } else {
1831 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001832 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 }
1834 }
1835}