blob: b99248d4a5b20a5cf84c67d3a05344080a25d82a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100 /* If the request has set req_last_cmd (typical for multi-HCI
101 * command requests) check if the completed command matches
102 * this, and if not just return. Single HCI command requests
103 * typically leave req_last_cmd as 0 */
104 if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
105 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 unsigned long opt, __u32 timeout)
128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_err(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberg23bb5762010-12-21 23:01:27 +0200161 hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
191
192static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193{
194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg23bb5762010-12-21 23:01:27 +0200264
265 hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
268static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269{
270 __u8 scan = opt;
271
272 BT_DBG("%s %x", hdev->name, scan);
273
274 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276}
277
278static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279{
280 __u8 auth = opt;
281
282 BT_DBG("%s %x", hdev->name, auth);
283
284 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
288static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289{
290 __u8 encrypt = opt;
291
292 BT_DBG("%s %x", hdev->name, encrypt);
293
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200294 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200298static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __le16 policy = cpu_to_le16(opt);
301
Marcel Holtmanna418b892008-11-30 12:17:28 +0100302 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200303
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306}
307
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900308/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Device is held on return. */
310struct hci_dev *hci_dev_get(int index)
311{
312 struct hci_dev *hdev = NULL;
313 struct list_head *p;
314
315 BT_DBG("%d", index);
316
317 if (index < 0)
318 return NULL;
319
320 read_lock(&hci_dev_list_lock);
321 list_for_each(p, &hci_dev_list) {
322 struct hci_dev *d = list_entry(p, struct hci_dev, list);
323 if (d->id == index) {
324 hdev = hci_dev_hold(d);
325 break;
326 }
327 }
328 read_unlock(&hci_dev_list_lock);
329 return hdev;
330}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
332/* ---- Inquiry support ---- */
333static void inquiry_cache_flush(struct hci_dev *hdev)
334{
335 struct inquiry_cache *cache = &hdev->inq_cache;
336 struct inquiry_entry *next = cache->list, *e;
337
338 BT_DBG("cache %p", cache);
339
340 cache->list = NULL;
341 while ((e = next)) {
342 next = e->next;
343 kfree(e);
344 }
345}
346
347struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
348{
349 struct inquiry_cache *cache = &hdev->inq_cache;
350 struct inquiry_entry *e;
351
352 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
353
354 for (e = cache->list; e; e = e->next)
355 if (!bacmp(&e->data.bdaddr, bdaddr))
356 break;
357 return e;
358}
359
360void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
361{
362 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200363 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200367 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
368 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200370 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
371 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373
374 ie->next = cache->list;
375 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 }
377
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200378 memcpy(&ie->data, data, sizeof(*data));
379 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 cache->timestamp = jiffies;
381}
382
383static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
384{
385 struct inquiry_cache *cache = &hdev->inq_cache;
386 struct inquiry_info *info = (struct inquiry_info *) buf;
387 struct inquiry_entry *e;
388 int copied = 0;
389
390 for (e = cache->list; e && copied < num; e = e->next, copied++) {
391 struct inquiry_data *data = &e->data;
392 bacpy(&info->bdaddr, &data->bdaddr);
393 info->pscan_rep_mode = data->pscan_rep_mode;
394 info->pscan_period_mode = data->pscan_period_mode;
395 info->pscan_mode = data->pscan_mode;
396 memcpy(info->dev_class, data->dev_class, 3);
397 info->clock_offset = data->clock_offset;
398 info++;
399 }
400
401 BT_DBG("cache %p, copied %d", cache, copied);
402 return copied;
403}
404
405static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
406{
407 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
408 struct hci_cp_inquiry cp;
409
410 BT_DBG("%s", hdev->name);
411
412 if (test_bit(HCI_INQUIRY, &hdev->flags))
413 return;
414
415 /* Start Inquiry */
416 memcpy(&cp.lap, &ir->lap, 3);
417 cp.length = ir->length;
418 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200419 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
422int hci_inquiry(void __user *arg)
423{
424 __u8 __user *ptr = arg;
425 struct hci_inquiry_req ir;
426 struct hci_dev *hdev;
427 int err = 0, do_inquiry = 0, max_rsp;
428 long timeo;
429 __u8 *buf;
430
431 if (copy_from_user(&ir, ptr, sizeof(ir)))
432 return -EFAULT;
433
434 if (!(hdev = hci_dev_get(ir.dev_id)))
435 return -ENODEV;
436
437 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900438 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200439 inquiry_cache_empty(hdev) ||
440 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 inquiry_cache_flush(hdev);
442 do_inquiry = 1;
443 }
444 hci_dev_unlock_bh(hdev);
445
Marcel Holtmann04837f62006-07-03 10:02:33 +0200446 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200447
448 if (do_inquiry) {
449 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450 if (err < 0)
451 goto done;
452 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
454 /* for unlimited number of responses we will use buffer with 255 entries */
455 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458 * copy it to the user space.
459 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200460 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
461 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 err = -ENOMEM;
463 goto done;
464 }
465
466 hci_dev_lock_bh(hdev);
467 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468 hci_dev_unlock_bh(hdev);
469
470 BT_DBG("num_rsp %d", ir.num_rsp);
471
472 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 ptr += sizeof(ir);
474 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475 ir.num_rsp))
476 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900477 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 err = -EFAULT;
479
480 kfree(buf);
481
482done:
483 hci_dev_put(hdev);
484 return err;
485}
486
487/* ---- HCI ioctl helpers ---- */
488
489int hci_dev_open(__u16 dev)
490{
491 struct hci_dev *hdev;
492 int ret = 0;
493
494 if (!(hdev = hci_dev_get(dev)))
495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
Marcel Holtmann943da252010-02-13 02:28:41 +0100514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev->dev_type != HCI_BREDR)
516 set_bit(HCI_RAW, &hdev->flags);
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (hdev->open(hdev)) {
519 ret = -EIO;
520 goto done;
521 }
522
523 if (!test_bit(HCI_RAW, &hdev->flags)) {
524 atomic_set(&hdev->cmd_cnt, 1);
525 set_bit(HCI_INIT, &hdev->flags);
526
527 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200528 ret = __hci_request(hdev, hci_init_req, 0,
529 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530
531 clear_bit(HCI_INIT, &hdev->flags);
532 }
533
534 if (!ret) {
535 hci_dev_hold(hdev);
536 set_bit(HCI_UP, &hdev->flags);
537 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200538 if (!test_bit(HCI_SETUP, &hdev->flags))
539 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 /* Init failed, cleanup */
542 tasklet_kill(&hdev->rx_task);
543 tasklet_kill(&hdev->tx_task);
544 tasklet_kill(&hdev->cmd_task);
545
546 skb_queue_purge(&hdev->cmd_q);
547 skb_queue_purge(&hdev->rx_q);
548
549 if (hdev->flush)
550 hdev->flush(hdev);
551
552 if (hdev->sent_cmd) {
553 kfree_skb(hdev->sent_cmd);
554 hdev->sent_cmd = NULL;
555 }
556
557 hdev->close(hdev);
558 hdev->flags = 0;
559 }
560
561done:
562 hci_req_unlock(hdev);
563 hci_dev_put(hdev);
564 return ret;
565}
566
567static int hci_dev_do_close(struct hci_dev *hdev)
568{
569 BT_DBG("%s %p", hdev->name, hdev);
570
571 hci_req_cancel(hdev, ENODEV);
572 hci_req_lock(hdev);
573
574 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
575 hci_req_unlock(hdev);
576 return 0;
577 }
578
579 /* Kill RX and TX tasks */
580 tasklet_kill(&hdev->rx_task);
581 tasklet_kill(&hdev->tx_task);
582
583 hci_dev_lock_bh(hdev);
584 inquiry_cache_flush(hdev);
585 hci_conn_hash_flush(hdev);
586 hci_dev_unlock_bh(hdev);
587
588 hci_notify(hdev, HCI_DEV_DOWN);
589
590 if (hdev->flush)
591 hdev->flush(hdev);
592
593 /* Reset device */
594 skb_queue_purge(&hdev->cmd_q);
595 atomic_set(&hdev->cmd_cnt, 1);
596 if (!test_bit(HCI_RAW, &hdev->flags)) {
597 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200598 __hci_request(hdev, hci_reset_req, 0,
599 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 clear_bit(HCI_INIT, &hdev->flags);
601 }
602
603 /* Kill cmd task */
604 tasklet_kill(&hdev->cmd_task);
605
606 /* Drop queues */
607 skb_queue_purge(&hdev->rx_q);
608 skb_queue_purge(&hdev->cmd_q);
609 skb_queue_purge(&hdev->raw_q);
610
611 /* Drop last sent command */
612 if (hdev->sent_cmd) {
613 kfree_skb(hdev->sent_cmd);
614 hdev->sent_cmd = NULL;
615 }
616
617 /* After this point our queues are empty
618 * and no tasks are scheduled. */
619 hdev->close(hdev);
620
Johan Hedberg5add6af2010-12-16 10:00:37 +0200621 mgmt_powered(hdev->id, 0);
622
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 /* Clear flags */
624 hdev->flags = 0;
625
626 hci_req_unlock(hdev);
627
628 hci_dev_put(hdev);
629 return 0;
630}
631
632int hci_dev_close(__u16 dev)
633{
634 struct hci_dev *hdev;
635 int err;
636
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200637 hdev = hci_dev_get(dev);
638 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 return -ENODEV;
640 err = hci_dev_do_close(hdev);
641 hci_dev_put(hdev);
642 return err;
643}
644
645int hci_dev_reset(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int ret = 0;
649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200650 hdev = hci_dev_get(dev);
651 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return -ENODEV;
653
654 hci_req_lock(hdev);
655 tasklet_disable(&hdev->tx_task);
656
657 if (!test_bit(HCI_UP, &hdev->flags))
658 goto done;
659
660 /* Drop queues */
661 skb_queue_purge(&hdev->rx_q);
662 skb_queue_purge(&hdev->cmd_q);
663
664 hci_dev_lock_bh(hdev);
665 inquiry_cache_flush(hdev);
666 hci_conn_hash_flush(hdev);
667 hci_dev_unlock_bh(hdev);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900672 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
674
675 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200676 ret = __hci_request(hdev, hci_reset_req, 0,
677 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679done:
680 tasklet_enable(&hdev->tx_task);
681 hci_req_unlock(hdev);
682 hci_dev_put(hdev);
683 return ret;
684}
685
686int hci_dev_reset_stat(__u16 dev)
687{
688 struct hci_dev *hdev;
689 int ret = 0;
690
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200691 hdev = hci_dev_get(dev);
692 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 return -ENODEV;
694
695 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
696
697 hci_dev_put(hdev);
698
699 return ret;
700}
701
702int hci_dev_cmd(unsigned int cmd, void __user *arg)
703{
704 struct hci_dev *hdev;
705 struct hci_dev_req dr;
706 int err = 0;
707
708 if (copy_from_user(&dr, arg, sizeof(dr)))
709 return -EFAULT;
710
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200711 hdev = hci_dev_get(dr.dev_id);
712 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 return -ENODEV;
714
715 switch (cmd) {
716 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200717 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
718 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 break;
720
721 case HCISETENCRYPT:
722 if (!lmp_encrypt_capable(hdev)) {
723 err = -EOPNOTSUPP;
724 break;
725 }
726
727 if (!test_bit(HCI_AUTH, &hdev->flags)) {
728 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200729 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (err)
732 break;
733 }
734
Marcel Holtmann04837f62006-07-03 10:02:33 +0200735 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 break;
738
739 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200740 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
741 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 break;
743
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200744 case HCISETLINKPOL:
745 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
747 break;
748
749 case HCISETLINKMODE:
750 hdev->link_mode = ((__u16) dr.dev_opt) &
751 (HCI_LM_MASTER | HCI_LM_ACCEPT);
752 break;
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 case HCISETPTYPE:
755 hdev->pkt_type = (__u16) dr.dev_opt;
756 break;
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200759 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
760 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 break;
762
763 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200764 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
765 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 break;
767
768 default:
769 err = -EINVAL;
770 break;
771 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 hci_dev_put(hdev);
774 return err;
775}
776
777int hci_get_dev_list(void __user *arg)
778{
779 struct hci_dev_list_req *dl;
780 struct hci_dev_req *dr;
781 struct list_head *p;
782 int n = 0, size, err;
783 __u16 dev_num;
784
785 if (get_user(dev_num, (__u16 __user *) arg))
786 return -EFAULT;
787
788 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
789 return -EINVAL;
790
791 size = sizeof(*dl) + dev_num * sizeof(*dr);
792
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200793 dl = kzalloc(size, GFP_KERNEL);
794 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 return -ENOMEM;
796
797 dr = dl->dev_req;
798
799 read_lock_bh(&hci_dev_list_lock);
800 list_for_each(p, &hci_dev_list) {
801 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200804
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200805 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200806
807 if (!test_bit(HCI_MGMT, &hdev->flags))
808 set_bit(HCI_PAIRABLE, &hdev->flags);
809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 (dr + n)->dev_id = hdev->id;
811 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 if (++n >= dev_num)
814 break;
815 }
816 read_unlock_bh(&hci_dev_list_lock);
817
818 dl->dev_num = n;
819 size = sizeof(*dl) + n * sizeof(*dr);
820
821 err = copy_to_user(arg, dl, size);
822 kfree(dl);
823
824 return err ? -EFAULT : 0;
825}
826
827int hci_get_dev_info(void __user *arg)
828{
829 struct hci_dev *hdev;
830 struct hci_dev_info di;
831 int err = 0;
832
833 if (copy_from_user(&di, arg, sizeof(di)))
834 return -EFAULT;
835
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200836 hdev = hci_dev_get(di.dev_id);
837 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 return -ENODEV;
839
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200840 hci_del_off_timer(hdev);
841
Johan Hedbergc542a062011-01-26 13:11:03 +0200842 if (!test_bit(HCI_MGMT, &hdev->flags))
843 set_bit(HCI_PAIRABLE, &hdev->flags);
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 strcpy(di.name, hdev->name);
846 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100847 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 di.flags = hdev->flags;
849 di.pkt_type = hdev->pkt_type;
850 di.acl_mtu = hdev->acl_mtu;
851 di.acl_pkts = hdev->acl_pkts;
852 di.sco_mtu = hdev->sco_mtu;
853 di.sco_pkts = hdev->sco_pkts;
854 di.link_policy = hdev->link_policy;
855 di.link_mode = hdev->link_mode;
856
857 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
858 memcpy(&di.features, &hdev->features, sizeof(di.features));
859
860 if (copy_to_user(arg, &di, sizeof(di)))
861 err = -EFAULT;
862
863 hci_dev_put(hdev);
864
865 return err;
866}
867
868/* ---- Interface to HCI drivers ---- */
869
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200870static int hci_rfkill_set_block(void *data, bool blocked)
871{
872 struct hci_dev *hdev = data;
873
874 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
875
876 if (!blocked)
877 return 0;
878
879 hci_dev_do_close(hdev);
880
881 return 0;
882}
883
884static const struct rfkill_ops hci_rfkill_ops = {
885 .set_block = hci_rfkill_set_block,
886};
887
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888/* Alloc HCI device */
889struct hci_dev *hci_alloc_dev(void)
890{
891 struct hci_dev *hdev;
892
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200893 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (!hdev)
895 return NULL;
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 skb_queue_head_init(&hdev->driver_init);
898
899 return hdev;
900}
901EXPORT_SYMBOL(hci_alloc_dev);
902
903/* Free HCI device */
904void hci_free_dev(struct hci_dev *hdev)
905{
906 skb_queue_purge(&hdev->driver_init);
907
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200908 /* will free via device release */
909 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911EXPORT_SYMBOL(hci_free_dev);
912
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200913static void hci_power_on(struct work_struct *work)
914{
915 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
916
917 BT_DBG("%s", hdev->name);
918
919 if (hci_dev_open(hdev->id) < 0)
920 return;
921
922 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
923 mod_timer(&hdev->off_timer,
924 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
925
926 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
927 mgmt_index_added(hdev->id);
928}
929
930static void hci_power_off(struct work_struct *work)
931{
932 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
933
934 BT_DBG("%s", hdev->name);
935
936 hci_dev_close(hdev->id);
937}
938
939static void hci_auto_off(unsigned long data)
940{
941 struct hci_dev *hdev = (struct hci_dev *) data;
942
943 BT_DBG("%s", hdev->name);
944
945 clear_bit(HCI_AUTO_OFF, &hdev->flags);
946
947 queue_work(hdev->workqueue, &hdev->power_off);
948}
949
950void hci_del_off_timer(struct hci_dev *hdev)
951{
952 BT_DBG("%s", hdev->name);
953
954 clear_bit(HCI_AUTO_OFF, &hdev->flags);
955 del_timer(&hdev->off_timer);
956}
957
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200958int hci_uuids_clear(struct hci_dev *hdev)
959{
960 struct list_head *p, *n;
961
962 list_for_each_safe(p, n, &hdev->uuids) {
963 struct bt_uuid *uuid;
964
965 uuid = list_entry(p, struct bt_uuid, list);
966
967 list_del(p);
968 kfree(uuid);
969 }
970
971 return 0;
972}
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974/* Register HCI device */
975int hci_register_dev(struct hci_dev *hdev)
976{
977 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200978 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100980 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
981 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 if (!hdev->open || !hdev->close || !hdev->destruct)
984 return -EINVAL;
985
986 write_lock_bh(&hci_dev_list_lock);
987
988 /* Find first available device id */
989 list_for_each(p, &hci_dev_list) {
990 if (list_entry(p, struct hci_dev, list)->id != id)
991 break;
992 head = p; id++;
993 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 sprintf(hdev->name, "hci%d", id);
996 hdev->id = id;
997 list_add(&hdev->list, head);
998
999 atomic_set(&hdev->refcnt, 1);
1000 spin_lock_init(&hdev->lock);
1001
1002 hdev->flags = 0;
1003 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001004 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 hdev->link_mode = (HCI_LM_ACCEPT);
1006
Marcel Holtmann04837f62006-07-03 10:02:33 +02001007 hdev->idle_timeout = 0;
1008 hdev->sniff_max_interval = 800;
1009 hdev->sniff_min_interval = 80;
1010
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001011 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1013 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1014
1015 skb_queue_head_init(&hdev->rx_q);
1016 skb_queue_head_init(&hdev->cmd_q);
1017 skb_queue_head_init(&hdev->raw_q);
1018
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301019 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001020 hdev->reassembly[i] = NULL;
1021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001023 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
1025 inquiry_cache_init(hdev);
1026
1027 hci_conn_hash_init(hdev);
1028
David Millerea4bd8b2010-07-30 21:54:49 -07001029 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001030
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001031 INIT_LIST_HEAD(&hdev->uuids);
1032
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001033 INIT_WORK(&hdev->power_on, hci_power_on);
1034 INIT_WORK(&hdev->power_off, hci_power_off);
1035 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1038
1039 atomic_set(&hdev->promisc, 0);
1040
1041 write_unlock_bh(&hci_dev_list_lock);
1042
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001043 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1044 if (!hdev->workqueue)
1045 goto nomem;
1046
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 hci_register_sysfs(hdev);
1048
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001049 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1050 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1051 if (hdev->rfkill) {
1052 if (rfkill_register(hdev->rfkill) < 0) {
1053 rfkill_destroy(hdev->rfkill);
1054 hdev->rfkill = NULL;
1055 }
1056 }
1057
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001058 set_bit(HCI_AUTO_OFF, &hdev->flags);
1059 set_bit(HCI_SETUP, &hdev->flags);
1060 queue_work(hdev->workqueue, &hdev->power_on);
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 hci_notify(hdev, HCI_DEV_REG);
1063
1064 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001065
1066nomem:
1067 write_lock_bh(&hci_dev_list_lock);
1068 list_del(&hdev->list);
1069 write_unlock_bh(&hci_dev_list_lock);
1070
1071 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072}
1073EXPORT_SYMBOL(hci_register_dev);
1074
1075/* Unregister HCI device */
1076int hci_unregister_dev(struct hci_dev *hdev)
1077{
Marcel Holtmannef222012007-07-11 06:42:04 +02001078 int i;
1079
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001080 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 write_lock_bh(&hci_dev_list_lock);
1083 list_del(&hdev->list);
1084 write_unlock_bh(&hci_dev_list_lock);
1085
1086 hci_dev_do_close(hdev);
1087
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301088 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001089 kfree_skb(hdev->reassembly[i]);
1090
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001091 if (!test_bit(HCI_INIT, &hdev->flags) &&
1092 !test_bit(HCI_SETUP, &hdev->flags))
1093 mgmt_index_removed(hdev->id);
1094
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 hci_notify(hdev, HCI_DEV_UNREG);
1096
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001097 if (hdev->rfkill) {
1098 rfkill_unregister(hdev->rfkill);
1099 rfkill_destroy(hdev->rfkill);
1100 }
1101
Dave Young147e2d52008-03-05 18:45:59 -08001102 hci_unregister_sysfs(hdev);
1103
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001104 destroy_workqueue(hdev->workqueue);
1105
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001106 hci_dev_lock_bh(hdev);
1107 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001108 hci_uuids_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001109 hci_dev_unlock_bh(hdev);
1110
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 return 0;
1114}
1115EXPORT_SYMBOL(hci_unregister_dev);
1116
1117/* Suspend HCI device */
1118int hci_suspend_dev(struct hci_dev *hdev)
1119{
1120 hci_notify(hdev, HCI_DEV_SUSPEND);
1121 return 0;
1122}
1123EXPORT_SYMBOL(hci_suspend_dev);
1124
1125/* Resume HCI device */
1126int hci_resume_dev(struct hci_dev *hdev)
1127{
1128 hci_notify(hdev, HCI_DEV_RESUME);
1129 return 0;
1130}
1131EXPORT_SYMBOL(hci_resume_dev);
1132
Marcel Holtmann76bca882009-11-18 00:40:39 +01001133/* Receive frame from HCI drivers */
1134int hci_recv_frame(struct sk_buff *skb)
1135{
1136 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1137 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1138 && !test_bit(HCI_INIT, &hdev->flags))) {
1139 kfree_skb(skb);
1140 return -ENXIO;
1141 }
1142
1143 /* Incomming skb */
1144 bt_cb(skb)->incoming = 1;
1145
1146 /* Time stamp */
1147 __net_timestamp(skb);
1148
1149 /* Queue frame for rx task */
1150 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001151 tasklet_schedule(&hdev->rx_task);
1152
Marcel Holtmann76bca882009-11-18 00:40:39 +01001153 return 0;
1154}
1155EXPORT_SYMBOL(hci_recv_frame);
1156
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301157static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1158 int count, __u8 index, gfp_t gfp_mask)
1159{
1160 int len = 0;
1161 int hlen = 0;
1162 int remain = count;
1163 struct sk_buff *skb;
1164 struct bt_skb_cb *scb;
1165
1166 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1167 index >= NUM_REASSEMBLY)
1168 return -EILSEQ;
1169
1170 skb = hdev->reassembly[index];
1171
1172 if (!skb) {
1173 switch (type) {
1174 case HCI_ACLDATA_PKT:
1175 len = HCI_MAX_FRAME_SIZE;
1176 hlen = HCI_ACL_HDR_SIZE;
1177 break;
1178 case HCI_EVENT_PKT:
1179 len = HCI_MAX_EVENT_SIZE;
1180 hlen = HCI_EVENT_HDR_SIZE;
1181 break;
1182 case HCI_SCODATA_PKT:
1183 len = HCI_MAX_SCO_SIZE;
1184 hlen = HCI_SCO_HDR_SIZE;
1185 break;
1186 }
1187
1188 skb = bt_skb_alloc(len, gfp_mask);
1189 if (!skb)
1190 return -ENOMEM;
1191
1192 scb = (void *) skb->cb;
1193 scb->expect = hlen;
1194 scb->pkt_type = type;
1195
1196 skb->dev = (void *) hdev;
1197 hdev->reassembly[index] = skb;
1198 }
1199
1200 while (count) {
1201 scb = (void *) skb->cb;
1202 len = min(scb->expect, (__u16)count);
1203
1204 memcpy(skb_put(skb, len), data, len);
1205
1206 count -= len;
1207 data += len;
1208 scb->expect -= len;
1209 remain = count;
1210
1211 switch (type) {
1212 case HCI_EVENT_PKT:
1213 if (skb->len == HCI_EVENT_HDR_SIZE) {
1214 struct hci_event_hdr *h = hci_event_hdr(skb);
1215 scb->expect = h->plen;
1216
1217 if (skb_tailroom(skb) < scb->expect) {
1218 kfree_skb(skb);
1219 hdev->reassembly[index] = NULL;
1220 return -ENOMEM;
1221 }
1222 }
1223 break;
1224
1225 case HCI_ACLDATA_PKT:
1226 if (skb->len == HCI_ACL_HDR_SIZE) {
1227 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1228 scb->expect = __le16_to_cpu(h->dlen);
1229
1230 if (skb_tailroom(skb) < scb->expect) {
1231 kfree_skb(skb);
1232 hdev->reassembly[index] = NULL;
1233 return -ENOMEM;
1234 }
1235 }
1236 break;
1237
1238 case HCI_SCODATA_PKT:
1239 if (skb->len == HCI_SCO_HDR_SIZE) {
1240 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1241 scb->expect = h->dlen;
1242
1243 if (skb_tailroom(skb) < scb->expect) {
1244 kfree_skb(skb);
1245 hdev->reassembly[index] = NULL;
1246 return -ENOMEM;
1247 }
1248 }
1249 break;
1250 }
1251
1252 if (scb->expect == 0) {
1253 /* Complete frame */
1254
1255 bt_cb(skb)->pkt_type = type;
1256 hci_recv_frame(skb);
1257
1258 hdev->reassembly[index] = NULL;
1259 return remain;
1260 }
1261 }
1262
1263 return remain;
1264}
1265
Marcel Holtmannef222012007-07-11 06:42:04 +02001266int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1267{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301268 int rem = 0;
1269
Marcel Holtmannef222012007-07-11 06:42:04 +02001270 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1271 return -EILSEQ;
1272
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001273 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301274 rem = hci_reassembly(hdev, type, data, count,
1275 type - 1, GFP_ATOMIC);
1276 if (rem < 0)
1277 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001278
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301279 data += (count - rem);
1280 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001281 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001282
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301283 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001284}
1285EXPORT_SYMBOL(hci_recv_fragment);
1286
Suraj Sumangala99811512010-07-14 13:02:19 +05301287#define STREAM_REASSEMBLY 0
1288
1289int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1290{
1291 int type;
1292 int rem = 0;
1293
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001294 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301295 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1296
1297 if (!skb) {
1298 struct { char type; } *pkt;
1299
1300 /* Start of the frame */
1301 pkt = data;
1302 type = pkt->type;
1303
1304 data++;
1305 count--;
1306 } else
1307 type = bt_cb(skb)->pkt_type;
1308
1309 rem = hci_reassembly(hdev, type, data,
1310 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1311 if (rem < 0)
1312 return rem;
1313
1314 data += (count - rem);
1315 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001316 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301317
1318 return rem;
1319}
1320EXPORT_SYMBOL(hci_recv_stream_fragment);
1321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322/* ---- Interface to upper protocols ---- */
1323
1324/* Register/Unregister protocols.
1325 * hci_task_lock is used to ensure that no tasks are running. */
1326int hci_register_proto(struct hci_proto *hp)
1327{
1328 int err = 0;
1329
1330 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1331
1332 if (hp->id >= HCI_MAX_PROTO)
1333 return -EINVAL;
1334
1335 write_lock_bh(&hci_task_lock);
1336
1337 if (!hci_proto[hp->id])
1338 hci_proto[hp->id] = hp;
1339 else
1340 err = -EEXIST;
1341
1342 write_unlock_bh(&hci_task_lock);
1343
1344 return err;
1345}
1346EXPORT_SYMBOL(hci_register_proto);
1347
1348int hci_unregister_proto(struct hci_proto *hp)
1349{
1350 int err = 0;
1351
1352 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1353
1354 if (hp->id >= HCI_MAX_PROTO)
1355 return -EINVAL;
1356
1357 write_lock_bh(&hci_task_lock);
1358
1359 if (hci_proto[hp->id])
1360 hci_proto[hp->id] = NULL;
1361 else
1362 err = -ENOENT;
1363
1364 write_unlock_bh(&hci_task_lock);
1365
1366 return err;
1367}
1368EXPORT_SYMBOL(hci_unregister_proto);
1369
1370int hci_register_cb(struct hci_cb *cb)
1371{
1372 BT_DBG("%p name %s", cb, cb->name);
1373
1374 write_lock_bh(&hci_cb_list_lock);
1375 list_add(&cb->list, &hci_cb_list);
1376 write_unlock_bh(&hci_cb_list_lock);
1377
1378 return 0;
1379}
1380EXPORT_SYMBOL(hci_register_cb);
1381
1382int hci_unregister_cb(struct hci_cb *cb)
1383{
1384 BT_DBG("%p name %s", cb, cb->name);
1385
1386 write_lock_bh(&hci_cb_list_lock);
1387 list_del(&cb->list);
1388 write_unlock_bh(&hci_cb_list_lock);
1389
1390 return 0;
1391}
1392EXPORT_SYMBOL(hci_unregister_cb);
1393
1394static int hci_send_frame(struct sk_buff *skb)
1395{
1396 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1397
1398 if (!hdev) {
1399 kfree_skb(skb);
1400 return -ENODEV;
1401 }
1402
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001403 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404
1405 if (atomic_read(&hdev->promisc)) {
1406 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001407 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001409 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 }
1411
1412 /* Get rid of skb owner, prior to sending to the driver. */
1413 skb_orphan(skb);
1414
1415 return hdev->send(skb);
1416}
1417
1418/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001419int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420{
1421 int len = HCI_COMMAND_HDR_SIZE + plen;
1422 struct hci_command_hdr *hdr;
1423 struct sk_buff *skb;
1424
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001425 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 skb = bt_skb_alloc(len, GFP_ATOMIC);
1428 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001429 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 return -ENOMEM;
1431 }
1432
1433 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001434 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 hdr->plen = plen;
1436
1437 if (plen)
1438 memcpy(skb_put(skb, plen), param, plen);
1439
1440 BT_DBG("skb len %d", skb->len);
1441
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001442 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001444
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001446 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 return 0;
1449}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
1451/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001452void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453{
1454 struct hci_command_hdr *hdr;
1455
1456 if (!hdev->sent_cmd)
1457 return NULL;
1458
1459 hdr = (void *) hdev->sent_cmd->data;
1460
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001461 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 return NULL;
1463
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001464 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1467}
1468
1469/* Send ACL data */
1470static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1471{
1472 struct hci_acl_hdr *hdr;
1473 int len = skb->len;
1474
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001475 skb_push(skb, HCI_ACL_HDR_SIZE);
1476 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001477 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001478 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1479 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480}
1481
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001482void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483{
1484 struct hci_dev *hdev = conn->hdev;
1485 struct sk_buff *list;
1486
1487 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1488
1489 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001490 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001491 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001493 list = skb_shinfo(skb)->frag_list;
1494 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 /* Non fragmented */
1496 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1497
1498 skb_queue_tail(&conn->data_q, skb);
1499 } else {
1500 /* Fragmented */
1501 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1502
1503 skb_shinfo(skb)->frag_list = NULL;
1504
1505 /* Queue all fragments atomically */
1506 spin_lock_bh(&conn->data_q.lock);
1507
1508 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001509
1510 flags &= ~ACL_START;
1511 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 do {
1513 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001514
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001516 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001517 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
1519 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1520
1521 __skb_queue_tail(&conn->data_q, skb);
1522 } while (list);
1523
1524 spin_unlock_bh(&conn->data_q.lock);
1525 }
1526
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001527 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528}
1529EXPORT_SYMBOL(hci_send_acl);
1530
1531/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001532void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
1534 struct hci_dev *hdev = conn->hdev;
1535 struct hci_sco_hdr hdr;
1536
1537 BT_DBG("%s len %d", hdev->name, skb->len);
1538
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001539 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 hdr.dlen = skb->len;
1541
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001542 skb_push(skb, HCI_SCO_HDR_SIZE);
1543 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001544 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545
1546 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001547 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001550 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
1552EXPORT_SYMBOL(hci_send_sco);
1553
1554/* ---- HCI TX task (outgoing data) ---- */
1555
1556/* HCI Connection scheduler */
1557static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1558{
1559 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001560 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561 int num = 0, min = ~0;
1562 struct list_head *p;
1563
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001564 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 * added and removed with TX task disabled. */
1566 list_for_each(p, &h->list) {
1567 struct hci_conn *c;
1568 c = list_entry(p, struct hci_conn, list);
1569
Marcel Holtmann769be972008-07-14 20:13:49 +02001570 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001572
1573 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1574 continue;
1575
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 num++;
1577
1578 if (c->sent < min) {
1579 min = c->sent;
1580 conn = c;
1581 }
1582 }
1583
1584 if (conn) {
1585 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1586 int q = cnt / num;
1587 *quote = q ? q : 1;
1588 } else
1589 *quote = 0;
1590
1591 BT_DBG("conn %p quote %d", conn, *quote);
1592 return conn;
1593}
1594
1595static inline void hci_acl_tx_to(struct hci_dev *hdev)
1596{
1597 struct hci_conn_hash *h = &hdev->conn_hash;
1598 struct list_head *p;
1599 struct hci_conn *c;
1600
1601 BT_ERR("%s ACL tx timeout", hdev->name);
1602
1603 /* Kill stalled connections */
1604 list_for_each(p, &h->list) {
1605 c = list_entry(p, struct hci_conn, list);
1606 if (c->type == ACL_LINK && c->sent) {
1607 BT_ERR("%s killing stalled ACL connection %s",
1608 hdev->name, batostr(&c->dst));
1609 hci_acl_disconn(c, 0x13);
1610 }
1611 }
1612}
1613
1614static inline void hci_sched_acl(struct hci_dev *hdev)
1615{
1616 struct hci_conn *conn;
1617 struct sk_buff *skb;
1618 int quote;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (!test_bit(HCI_RAW, &hdev->flags)) {
1623 /* ACL tx timeout must be longer than maximum
1624 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001625 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 hci_acl_tx_to(hdev);
1627 }
1628
1629 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1630 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1631 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001632
1633 hci_conn_enter_active_mode(conn);
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 hci_send_frame(skb);
1636 hdev->acl_last_tx = jiffies;
1637
1638 hdev->acl_cnt--;
1639 conn->sent++;
1640 }
1641 }
1642}
1643
1644/* Schedule SCO */
1645static inline void hci_sched_sco(struct hci_dev *hdev)
1646{
1647 struct hci_conn *conn;
1648 struct sk_buff *skb;
1649 int quote;
1650
1651 BT_DBG("%s", hdev->name);
1652
1653 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1654 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1655 BT_DBG("skb %p len %d", skb, skb->len);
1656 hci_send_frame(skb);
1657
1658 conn->sent++;
1659 if (conn->sent == ~0)
1660 conn->sent = 0;
1661 }
1662 }
1663}
1664
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001665static inline void hci_sched_esco(struct hci_dev *hdev)
1666{
1667 struct hci_conn *conn;
1668 struct sk_buff *skb;
1669 int quote;
1670
1671 BT_DBG("%s", hdev->name);
1672
1673 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1674 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1675 BT_DBG("skb %p len %d", skb, skb->len);
1676 hci_send_frame(skb);
1677
1678 conn->sent++;
1679 if (conn->sent == ~0)
1680 conn->sent = 0;
1681 }
1682 }
1683}
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685static void hci_tx_task(unsigned long arg)
1686{
1687 struct hci_dev *hdev = (struct hci_dev *) arg;
1688 struct sk_buff *skb;
1689
1690 read_lock(&hci_task_lock);
1691
1692 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1693
1694 /* Schedule queues and send stuff to HCI driver */
1695
1696 hci_sched_acl(hdev);
1697
1698 hci_sched_sco(hdev);
1699
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001700 hci_sched_esco(hdev);
1701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702 /* Send next queued raw (unknown type) packet */
1703 while ((skb = skb_dequeue(&hdev->raw_q)))
1704 hci_send_frame(skb);
1705
1706 read_unlock(&hci_task_lock);
1707}
1708
1709/* ----- HCI RX task (incoming data proccessing) ----- */
1710
1711/* ACL data packet */
1712static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1713{
1714 struct hci_acl_hdr *hdr = (void *) skb->data;
1715 struct hci_conn *conn;
1716 __u16 handle, flags;
1717
1718 skb_pull(skb, HCI_ACL_HDR_SIZE);
1719
1720 handle = __le16_to_cpu(hdr->handle);
1721 flags = hci_flags(handle);
1722 handle = hci_handle(handle);
1723
1724 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1725
1726 hdev->stat.acl_rx++;
1727
1728 hci_dev_lock(hdev);
1729 conn = hci_conn_hash_lookup_handle(hdev, handle);
1730 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 if (conn) {
1733 register struct hci_proto *hp;
1734
Marcel Holtmann04837f62006-07-03 10:02:33 +02001735 hci_conn_enter_active_mode(conn);
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001738 hp = hci_proto[HCI_PROTO_L2CAP];
1739 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 hp->recv_acldata(conn, skb, flags);
1741 return;
1742 }
1743 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001744 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 hdev->name, handle);
1746 }
1747
1748 kfree_skb(skb);
1749}
1750
1751/* SCO data packet */
1752static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1753{
1754 struct hci_sco_hdr *hdr = (void *) skb->data;
1755 struct hci_conn *conn;
1756 __u16 handle;
1757
1758 skb_pull(skb, HCI_SCO_HDR_SIZE);
1759
1760 handle = __le16_to_cpu(hdr->handle);
1761
1762 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1763
1764 hdev->stat.sco_rx++;
1765
1766 hci_dev_lock(hdev);
1767 conn = hci_conn_hash_lookup_handle(hdev, handle);
1768 hci_dev_unlock(hdev);
1769
1770 if (conn) {
1771 register struct hci_proto *hp;
1772
1773 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001774 hp = hci_proto[HCI_PROTO_SCO];
1775 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 hp->recv_scodata(conn, skb);
1777 return;
1778 }
1779 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001780 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 hdev->name, handle);
1782 }
1783
1784 kfree_skb(skb);
1785}
1786
Marcel Holtmann65164552005-10-28 19:20:48 +02001787static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
1789 struct hci_dev *hdev = (struct hci_dev *) arg;
1790 struct sk_buff *skb;
1791
1792 BT_DBG("%s", hdev->name);
1793
1794 read_lock(&hci_task_lock);
1795
1796 while ((skb = skb_dequeue(&hdev->rx_q))) {
1797 if (atomic_read(&hdev->promisc)) {
1798 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001799 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 }
1801
1802 if (test_bit(HCI_RAW, &hdev->flags)) {
1803 kfree_skb(skb);
1804 continue;
1805 }
1806
1807 if (test_bit(HCI_INIT, &hdev->flags)) {
1808 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001809 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 case HCI_ACLDATA_PKT:
1811 case HCI_SCODATA_PKT:
1812 kfree_skb(skb);
1813 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001814 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 }
1816
1817 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001818 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 case HCI_EVENT_PKT:
1820 hci_event_packet(hdev, skb);
1821 break;
1822
1823 case HCI_ACLDATA_PKT:
1824 BT_DBG("%s ACL data packet", hdev->name);
1825 hci_acldata_packet(hdev, skb);
1826 break;
1827
1828 case HCI_SCODATA_PKT:
1829 BT_DBG("%s SCO data packet", hdev->name);
1830 hci_scodata_packet(hdev, skb);
1831 break;
1832
1833 default:
1834 kfree_skb(skb);
1835 break;
1836 }
1837 }
1838
1839 read_unlock(&hci_task_lock);
1840}
1841
1842static void hci_cmd_task(unsigned long arg)
1843{
1844 struct hci_dev *hdev = (struct hci_dev *) arg;
1845 struct sk_buff *skb;
1846
1847 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1848
S.Çağlar Onur82453022008-02-17 23:25:57 -08001849 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 BT_ERR("%s command tx timeout", hdev->name);
1851 atomic_set(&hdev->cmd_cnt, 1);
1852 }
1853
1854 /* Send queued commands */
1855 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001856 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001858 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1859 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 atomic_dec(&hdev->cmd_cnt);
1861 hci_send_frame(skb);
1862 hdev->cmd_last_tx = jiffies;
1863 } else {
1864 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001865 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 }
1867 }
1868}