blob: cedb8a966df6568b6eb53a1d2883e3ea8352c569 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 /* Page timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x8000);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260
261 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700262 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200264
265 bacpy(&cp.bdaddr, BDADDR_ANY);
266 cp.delete_all = 1;
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268}
269
270static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
271{
272 __u8 scan = opt;
273
274 BT_DBG("%s %x", hdev->name, scan);
275
276 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200277 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
280static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
281{
282 __u8 auth = opt;
283
284 BT_DBG("%s %x", hdev->name, auth);
285
286 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200287 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
290static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
291{
292 __u8 encrypt = opt;
293
294 BT_DBG("%s %x", hdev->name, encrypt);
295
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200296 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200297 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298}
299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200300static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
301{
302 __le16 policy = cpu_to_le16(opt);
303
Marcel Holtmanna418b892008-11-30 12:17:28 +0100304 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200305
306 /* Default link policy */
307 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
308}
309
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900310/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 * Device is held on return. */
312struct hci_dev *hci_dev_get(int index)
313{
314 struct hci_dev *hdev = NULL;
315 struct list_head *p;
316
317 BT_DBG("%d", index);
318
319 if (index < 0)
320 return NULL;
321
322 read_lock(&hci_dev_list_lock);
323 list_for_each(p, &hci_dev_list) {
324 struct hci_dev *d = list_entry(p, struct hci_dev, list);
325 if (d->id == index) {
326 hdev = hci_dev_hold(d);
327 break;
328 }
329 }
330 read_unlock(&hci_dev_list_lock);
331 return hdev;
332}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
334/* ---- Inquiry support ---- */
335static void inquiry_cache_flush(struct hci_dev *hdev)
336{
337 struct inquiry_cache *cache = &hdev->inq_cache;
338 struct inquiry_entry *next = cache->list, *e;
339
340 BT_DBG("cache %p", cache);
341
342 cache->list = NULL;
343 while ((e = next)) {
344 next = e->next;
345 kfree(e);
346 }
347}
348
349struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
350{
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
353
354 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
355
356 for (e = cache->list; e; e = e->next)
357 if (!bacmp(&e->data.bdaddr, bdaddr))
358 break;
359 return e;
360}
361
362void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
363{
364 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200365 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
367 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
368
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200369 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
370 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200372 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
373 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200375
376 ie->next = cache->list;
377 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 }
379
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200380 memcpy(&ie->data, data, sizeof(*data));
381 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 cache->timestamp = jiffies;
383}
384
385static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
386{
387 struct inquiry_cache *cache = &hdev->inq_cache;
388 struct inquiry_info *info = (struct inquiry_info *) buf;
389 struct inquiry_entry *e;
390 int copied = 0;
391
392 for (e = cache->list; e && copied < num; e = e->next, copied++) {
393 struct inquiry_data *data = &e->data;
394 bacpy(&info->bdaddr, &data->bdaddr);
395 info->pscan_rep_mode = data->pscan_rep_mode;
396 info->pscan_period_mode = data->pscan_period_mode;
397 info->pscan_mode = data->pscan_mode;
398 memcpy(info->dev_class, data->dev_class, 3);
399 info->clock_offset = data->clock_offset;
400 info++;
401 }
402
403 BT_DBG("cache %p, copied %d", cache, copied);
404 return copied;
405}
406
407static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
408{
409 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
410 struct hci_cp_inquiry cp;
411
412 BT_DBG("%s", hdev->name);
413
414 if (test_bit(HCI_INQUIRY, &hdev->flags))
415 return;
416
417 /* Start Inquiry */
418 memcpy(&cp.lap, &ir->lap, 3);
419 cp.length = ir->length;
420 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200421 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
424int hci_inquiry(void __user *arg)
425{
426 __u8 __user *ptr = arg;
427 struct hci_inquiry_req ir;
428 struct hci_dev *hdev;
429 int err = 0, do_inquiry = 0, max_rsp;
430 long timeo;
431 __u8 *buf;
432
433 if (copy_from_user(&ir, ptr, sizeof(ir)))
434 return -EFAULT;
435
436 if (!(hdev = hci_dev_get(ir.dev_id)))
437 return -ENODEV;
438
439 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900440 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200441 inquiry_cache_empty(hdev) ||
442 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 inquiry_cache_flush(hdev);
444 do_inquiry = 1;
445 }
446 hci_dev_unlock_bh(hdev);
447
Marcel Holtmann04837f62006-07-03 10:02:33 +0200448 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200449
450 if (do_inquiry) {
451 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
452 if (err < 0)
453 goto done;
454 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 /* for unlimited number of responses we will use buffer with 255 entries */
457 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
458
459 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
460 * copy it to the user space.
461 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200462 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
463 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 err = -ENOMEM;
465 goto done;
466 }
467
468 hci_dev_lock_bh(hdev);
469 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
470 hci_dev_unlock_bh(hdev);
471
472 BT_DBG("num_rsp %d", ir.num_rsp);
473
474 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
475 ptr += sizeof(ir);
476 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
477 ir.num_rsp))
478 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900479 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 err = -EFAULT;
481
482 kfree(buf);
483
484done:
485 hci_dev_put(hdev);
486 return err;
487}
488
489/* ---- HCI ioctl helpers ---- */
490
491int hci_dev_open(__u16 dev)
492{
493 struct hci_dev *hdev;
494 int ret = 0;
495
496 if (!(hdev = hci_dev_get(dev)))
497 return -ENODEV;
498
499 BT_DBG("%s %p", hdev->name, hdev);
500
501 hci_req_lock(hdev);
502
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200503 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
504 ret = -ERFKILL;
505 goto done;
506 }
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (test_bit(HCI_UP, &hdev->flags)) {
509 ret = -EALREADY;
510 goto done;
511 }
512
513 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
514 set_bit(HCI_RAW, &hdev->flags);
515
Marcel Holtmann943da252010-02-13 02:28:41 +0100516 /* Treat all non BR/EDR controllers as raw devices for now */
517 if (hdev->dev_type != HCI_BREDR)
518 set_bit(HCI_RAW, &hdev->flags);
519
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 if (hdev->open(hdev)) {
521 ret = -EIO;
522 goto done;
523 }
524
525 if (!test_bit(HCI_RAW, &hdev->flags)) {
526 atomic_set(&hdev->cmd_cnt, 1);
527 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200528 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200531 ret = __hci_request(hdev, hci_init_req, 0,
532 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533
534 clear_bit(HCI_INIT, &hdev->flags);
535 }
536
537 if (!ret) {
538 hci_dev_hold(hdev);
539 set_bit(HCI_UP, &hdev->flags);
540 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200541 if (!test_bit(HCI_SETUP, &hdev->flags))
542 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900543 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* Init failed, cleanup */
545 tasklet_kill(&hdev->rx_task);
546 tasklet_kill(&hdev->tx_task);
547 tasklet_kill(&hdev->cmd_task);
548
549 skb_queue_purge(&hdev->cmd_q);
550 skb_queue_purge(&hdev->rx_q);
551
552 if (hdev->flush)
553 hdev->flush(hdev);
554
555 if (hdev->sent_cmd) {
556 kfree_skb(hdev->sent_cmd);
557 hdev->sent_cmd = NULL;
558 }
559
560 hdev->close(hdev);
561 hdev->flags = 0;
562 }
563
564done:
565 hci_req_unlock(hdev);
566 hci_dev_put(hdev);
567 return ret;
568}
569
570static int hci_dev_do_close(struct hci_dev *hdev)
571{
572 BT_DBG("%s %p", hdev->name, hdev);
573
574 hci_req_cancel(hdev, ENODEV);
575 hci_req_lock(hdev);
576
577 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
578 hci_req_unlock(hdev);
579 return 0;
580 }
581
582 /* Kill RX and TX tasks */
583 tasklet_kill(&hdev->rx_task);
584 tasklet_kill(&hdev->tx_task);
585
586 hci_dev_lock_bh(hdev);
587 inquiry_cache_flush(hdev);
588 hci_conn_hash_flush(hdev);
589 hci_dev_unlock_bh(hdev);
590
591 hci_notify(hdev, HCI_DEV_DOWN);
592
593 if (hdev->flush)
594 hdev->flush(hdev);
595
596 /* Reset device */
597 skb_queue_purge(&hdev->cmd_q);
598 atomic_set(&hdev->cmd_cnt, 1);
599 if (!test_bit(HCI_RAW, &hdev->flags)) {
600 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200601 __hci_request(hdev, hci_reset_req, 0,
602 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 clear_bit(HCI_INIT, &hdev->flags);
604 }
605
606 /* Kill cmd task */
607 tasklet_kill(&hdev->cmd_task);
608
609 /* Drop queues */
610 skb_queue_purge(&hdev->rx_q);
611 skb_queue_purge(&hdev->cmd_q);
612 skb_queue_purge(&hdev->raw_q);
613
614 /* Drop last sent command */
615 if (hdev->sent_cmd) {
616 kfree_skb(hdev->sent_cmd);
617 hdev->sent_cmd = NULL;
618 }
619
620 /* After this point our queues are empty
621 * and no tasks are scheduled. */
622 hdev->close(hdev);
623
Johan Hedberg5add6af2010-12-16 10:00:37 +0200624 mgmt_powered(hdev->id, 0);
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 /* Clear flags */
627 hdev->flags = 0;
628
629 hci_req_unlock(hdev);
630
631 hci_dev_put(hdev);
632 return 0;
633}
634
635int hci_dev_close(__u16 dev)
636{
637 struct hci_dev *hdev;
638 int err;
639
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200640 hdev = hci_dev_get(dev);
641 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 return -ENODEV;
643 err = hci_dev_do_close(hdev);
644 hci_dev_put(hdev);
645 return err;
646}
647
648int hci_dev_reset(__u16 dev)
649{
650 struct hci_dev *hdev;
651 int ret = 0;
652
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200653 hdev = hci_dev_get(dev);
654 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 return -ENODEV;
656
657 hci_req_lock(hdev);
658 tasklet_disable(&hdev->tx_task);
659
660 if (!test_bit(HCI_UP, &hdev->flags))
661 goto done;
662
663 /* Drop queues */
664 skb_queue_purge(&hdev->rx_q);
665 skb_queue_purge(&hdev->cmd_q);
666
667 hci_dev_lock_bh(hdev);
668 inquiry_cache_flush(hdev);
669 hci_conn_hash_flush(hdev);
670 hci_dev_unlock_bh(hdev);
671
672 if (hdev->flush)
673 hdev->flush(hdev);
674
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900675 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
677
678 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200679 ret = __hci_request(hdev, hci_reset_req, 0,
680 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682done:
683 tasklet_enable(&hdev->tx_task);
684 hci_req_unlock(hdev);
685 hci_dev_put(hdev);
686 return ret;
687}
688
689int hci_dev_reset_stat(__u16 dev)
690{
691 struct hci_dev *hdev;
692 int ret = 0;
693
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200694 hdev = hci_dev_get(dev);
695 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 return -ENODEV;
697
698 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
699
700 hci_dev_put(hdev);
701
702 return ret;
703}
704
705int hci_dev_cmd(unsigned int cmd, void __user *arg)
706{
707 struct hci_dev *hdev;
708 struct hci_dev_req dr;
709 int err = 0;
710
711 if (copy_from_user(&dr, arg, sizeof(dr)))
712 return -EFAULT;
713
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200714 hdev = hci_dev_get(dr.dev_id);
715 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 return -ENODEV;
717
718 switch (cmd) {
719 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200720 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
721 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 break;
723
724 case HCISETENCRYPT:
725 if (!lmp_encrypt_capable(hdev)) {
726 err = -EOPNOTSUPP;
727 break;
728 }
729
730 if (!test_bit(HCI_AUTH, &hdev->flags)) {
731 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200732 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
733 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 if (err)
735 break;
736 }
737
Marcel Holtmann04837f62006-07-03 10:02:33 +0200738 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
739 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 break;
741
742 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200743 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
744 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 break;
746
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200747 case HCISETLINKPOL:
748 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
750 break;
751
752 case HCISETLINKMODE:
753 hdev->link_mode = ((__u16) dr.dev_opt) &
754 (HCI_LM_MASTER | HCI_LM_ACCEPT);
755 break;
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 case HCISETPTYPE:
758 hdev->pkt_type = (__u16) dr.dev_opt;
759 break;
760
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200762 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
763 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 break;
765
766 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200767 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
768 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 break;
770
771 default:
772 err = -EINVAL;
773 break;
774 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200775
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 hci_dev_put(hdev);
777 return err;
778}
779
780int hci_get_dev_list(void __user *arg)
781{
782 struct hci_dev_list_req *dl;
783 struct hci_dev_req *dr;
784 struct list_head *p;
785 int n = 0, size, err;
786 __u16 dev_num;
787
788 if (get_user(dev_num, (__u16 __user *) arg))
789 return -EFAULT;
790
791 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
792 return -EINVAL;
793
794 size = sizeof(*dl) + dev_num * sizeof(*dr);
795
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200796 dl = kzalloc(size, GFP_KERNEL);
797 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 return -ENOMEM;
799
800 dr = dl->dev_req;
801
802 read_lock_bh(&hci_dev_list_lock);
803 list_for_each(p, &hci_dev_list) {
804 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200807
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200808 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200809
810 if (!test_bit(HCI_MGMT, &hdev->flags))
811 set_bit(HCI_PAIRABLE, &hdev->flags);
812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 (dr + n)->dev_id = hdev->id;
814 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 if (++n >= dev_num)
817 break;
818 }
819 read_unlock_bh(&hci_dev_list_lock);
820
821 dl->dev_num = n;
822 size = sizeof(*dl) + n * sizeof(*dr);
823
824 err = copy_to_user(arg, dl, size);
825 kfree(dl);
826
827 return err ? -EFAULT : 0;
828}
829
830int hci_get_dev_info(void __user *arg)
831{
832 struct hci_dev *hdev;
833 struct hci_dev_info di;
834 int err = 0;
835
836 if (copy_from_user(&di, arg, sizeof(di)))
837 return -EFAULT;
838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200839 hdev = hci_dev_get(di.dev_id);
840 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENODEV;
842
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200843 hci_del_off_timer(hdev);
844
Johan Hedbergc542a062011-01-26 13:11:03 +0200845 if (!test_bit(HCI_MGMT, &hdev->flags))
846 set_bit(HCI_PAIRABLE, &hdev->flags);
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 strcpy(di.name, hdev->name);
849 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100850 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 di.flags = hdev->flags;
852 di.pkt_type = hdev->pkt_type;
853 di.acl_mtu = hdev->acl_mtu;
854 di.acl_pkts = hdev->acl_pkts;
855 di.sco_mtu = hdev->sco_mtu;
856 di.sco_pkts = hdev->sco_pkts;
857 di.link_policy = hdev->link_policy;
858 di.link_mode = hdev->link_mode;
859
860 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
861 memcpy(&di.features, &hdev->features, sizeof(di.features));
862
863 if (copy_to_user(arg, &di, sizeof(di)))
864 err = -EFAULT;
865
866 hci_dev_put(hdev);
867
868 return err;
869}
870
871/* ---- Interface to HCI drivers ---- */
872
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200873static int hci_rfkill_set_block(void *data, bool blocked)
874{
875 struct hci_dev *hdev = data;
876
877 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
878
879 if (!blocked)
880 return 0;
881
882 hci_dev_do_close(hdev);
883
884 return 0;
885}
886
887static const struct rfkill_ops hci_rfkill_ops = {
888 .set_block = hci_rfkill_set_block,
889};
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891/* Alloc HCI device */
892struct hci_dev *hci_alloc_dev(void)
893{
894 struct hci_dev *hdev;
895
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200896 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (!hdev)
898 return NULL;
899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 skb_queue_head_init(&hdev->driver_init);
901
902 return hdev;
903}
904EXPORT_SYMBOL(hci_alloc_dev);
905
906/* Free HCI device */
907void hci_free_dev(struct hci_dev *hdev)
908{
909 skb_queue_purge(&hdev->driver_init);
910
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200911 /* will free via device release */
912 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914EXPORT_SYMBOL(hci_free_dev);
915
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200916static void hci_power_on(struct work_struct *work)
917{
918 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
919
920 BT_DBG("%s", hdev->name);
921
922 if (hci_dev_open(hdev->id) < 0)
923 return;
924
925 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
926 mod_timer(&hdev->off_timer,
927 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
928
929 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
930 mgmt_index_added(hdev->id);
931}
932
933static void hci_power_off(struct work_struct *work)
934{
935 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
936
937 BT_DBG("%s", hdev->name);
938
939 hci_dev_close(hdev->id);
940}
941
942static void hci_auto_off(unsigned long data)
943{
944 struct hci_dev *hdev = (struct hci_dev *) data;
945
946 BT_DBG("%s", hdev->name);
947
948 clear_bit(HCI_AUTO_OFF, &hdev->flags);
949
950 queue_work(hdev->workqueue, &hdev->power_off);
951}
952
953void hci_del_off_timer(struct hci_dev *hdev)
954{
955 BT_DBG("%s", hdev->name);
956
957 clear_bit(HCI_AUTO_OFF, &hdev->flags);
958 del_timer(&hdev->off_timer);
959}
960
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200961int hci_uuids_clear(struct hci_dev *hdev)
962{
963 struct list_head *p, *n;
964
965 list_for_each_safe(p, n, &hdev->uuids) {
966 struct bt_uuid *uuid;
967
968 uuid = list_entry(p, struct bt_uuid, list);
969
970 list_del(p);
971 kfree(uuid);
972 }
973
974 return 0;
975}
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977/* Register HCI device */
978int hci_register_dev(struct hci_dev *hdev)
979{
980 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +0200981 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Marcel Holtmannc13854ce2010-02-08 15:27:07 +0100983 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
984 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985
986 if (!hdev->open || !hdev->close || !hdev->destruct)
987 return -EINVAL;
988
989 write_lock_bh(&hci_dev_list_lock);
990
991 /* Find first available device id */
992 list_for_each(p, &hci_dev_list) {
993 if (list_entry(p, struct hci_dev, list)->id != id)
994 break;
995 head = p; id++;
996 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 sprintf(hdev->name, "hci%d", id);
999 hdev->id = id;
1000 list_add(&hdev->list, head);
1001
1002 atomic_set(&hdev->refcnt, 1);
1003 spin_lock_init(&hdev->lock);
1004
1005 hdev->flags = 0;
1006 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001007 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 hdev->link_mode = (HCI_LM_ACCEPT);
1009
Marcel Holtmann04837f62006-07-03 10:02:33 +02001010 hdev->idle_timeout = 0;
1011 hdev->sniff_max_interval = 800;
1012 hdev->sniff_min_interval = 80;
1013
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001014 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1016 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1017
1018 skb_queue_head_init(&hdev->rx_q);
1019 skb_queue_head_init(&hdev->cmd_q);
1020 skb_queue_head_init(&hdev->raw_q);
1021
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301022 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001023 hdev->reassembly[i] = NULL;
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001026 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027
1028 inquiry_cache_init(hdev);
1029
1030 hci_conn_hash_init(hdev);
1031
David Millerea4bd8b2010-07-30 21:54:49 -07001032 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001033
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001034 INIT_LIST_HEAD(&hdev->uuids);
1035
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001036 INIT_WORK(&hdev->power_on, hci_power_on);
1037 INIT_WORK(&hdev->power_off, hci_power_off);
1038 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1039
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1041
1042 atomic_set(&hdev->promisc, 0);
1043
1044 write_unlock_bh(&hci_dev_list_lock);
1045
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001046 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1047 if (!hdev->workqueue)
1048 goto nomem;
1049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 hci_register_sysfs(hdev);
1051
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001052 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1053 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1054 if (hdev->rfkill) {
1055 if (rfkill_register(hdev->rfkill) < 0) {
1056 rfkill_destroy(hdev->rfkill);
1057 hdev->rfkill = NULL;
1058 }
1059 }
1060
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001061 set_bit(HCI_AUTO_OFF, &hdev->flags);
1062 set_bit(HCI_SETUP, &hdev->flags);
1063 queue_work(hdev->workqueue, &hdev->power_on);
1064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 hci_notify(hdev, HCI_DEV_REG);
1066
1067 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001068
1069nomem:
1070 write_lock_bh(&hci_dev_list_lock);
1071 list_del(&hdev->list);
1072 write_unlock_bh(&hci_dev_list_lock);
1073
1074 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075}
1076EXPORT_SYMBOL(hci_register_dev);
1077
1078/* Unregister HCI device */
1079int hci_unregister_dev(struct hci_dev *hdev)
1080{
Marcel Holtmannef222012007-07-11 06:42:04 +02001081 int i;
1082
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001083 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 write_lock_bh(&hci_dev_list_lock);
1086 list_del(&hdev->list);
1087 write_unlock_bh(&hci_dev_list_lock);
1088
1089 hci_dev_do_close(hdev);
1090
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301091 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001092 kfree_skb(hdev->reassembly[i]);
1093
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001094 if (!test_bit(HCI_INIT, &hdev->flags) &&
1095 !test_bit(HCI_SETUP, &hdev->flags))
1096 mgmt_index_removed(hdev->id);
1097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 hci_notify(hdev, HCI_DEV_UNREG);
1099
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001100 if (hdev->rfkill) {
1101 rfkill_unregister(hdev->rfkill);
1102 rfkill_destroy(hdev->rfkill);
1103 }
1104
Dave Young147e2d52008-03-05 18:45:59 -08001105 hci_unregister_sysfs(hdev);
1106
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001107 destroy_workqueue(hdev->workqueue);
1108
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001109 hci_dev_lock_bh(hdev);
1110 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001111 hci_uuids_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001112 hci_dev_unlock_bh(hdev);
1113
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 return 0;
1117}
1118EXPORT_SYMBOL(hci_unregister_dev);
1119
1120/* Suspend HCI device */
1121int hci_suspend_dev(struct hci_dev *hdev)
1122{
1123 hci_notify(hdev, HCI_DEV_SUSPEND);
1124 return 0;
1125}
1126EXPORT_SYMBOL(hci_suspend_dev);
1127
1128/* Resume HCI device */
1129int hci_resume_dev(struct hci_dev *hdev)
1130{
1131 hci_notify(hdev, HCI_DEV_RESUME);
1132 return 0;
1133}
1134EXPORT_SYMBOL(hci_resume_dev);
1135
Marcel Holtmann76bca882009-11-18 00:40:39 +01001136/* Receive frame from HCI drivers */
1137int hci_recv_frame(struct sk_buff *skb)
1138{
1139 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1140 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1141 && !test_bit(HCI_INIT, &hdev->flags))) {
1142 kfree_skb(skb);
1143 return -ENXIO;
1144 }
1145
1146 /* Incomming skb */
1147 bt_cb(skb)->incoming = 1;
1148
1149 /* Time stamp */
1150 __net_timestamp(skb);
1151
1152 /* Queue frame for rx task */
1153 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001154 tasklet_schedule(&hdev->rx_task);
1155
Marcel Holtmann76bca882009-11-18 00:40:39 +01001156 return 0;
1157}
1158EXPORT_SYMBOL(hci_recv_frame);
1159
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301160static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1161 int count, __u8 index, gfp_t gfp_mask)
1162{
1163 int len = 0;
1164 int hlen = 0;
1165 int remain = count;
1166 struct sk_buff *skb;
1167 struct bt_skb_cb *scb;
1168
1169 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1170 index >= NUM_REASSEMBLY)
1171 return -EILSEQ;
1172
1173 skb = hdev->reassembly[index];
1174
1175 if (!skb) {
1176 switch (type) {
1177 case HCI_ACLDATA_PKT:
1178 len = HCI_MAX_FRAME_SIZE;
1179 hlen = HCI_ACL_HDR_SIZE;
1180 break;
1181 case HCI_EVENT_PKT:
1182 len = HCI_MAX_EVENT_SIZE;
1183 hlen = HCI_EVENT_HDR_SIZE;
1184 break;
1185 case HCI_SCODATA_PKT:
1186 len = HCI_MAX_SCO_SIZE;
1187 hlen = HCI_SCO_HDR_SIZE;
1188 break;
1189 }
1190
1191 skb = bt_skb_alloc(len, gfp_mask);
1192 if (!skb)
1193 return -ENOMEM;
1194
1195 scb = (void *) skb->cb;
1196 scb->expect = hlen;
1197 scb->pkt_type = type;
1198
1199 skb->dev = (void *) hdev;
1200 hdev->reassembly[index] = skb;
1201 }
1202
1203 while (count) {
1204 scb = (void *) skb->cb;
1205 len = min(scb->expect, (__u16)count);
1206
1207 memcpy(skb_put(skb, len), data, len);
1208
1209 count -= len;
1210 data += len;
1211 scb->expect -= len;
1212 remain = count;
1213
1214 switch (type) {
1215 case HCI_EVENT_PKT:
1216 if (skb->len == HCI_EVENT_HDR_SIZE) {
1217 struct hci_event_hdr *h = hci_event_hdr(skb);
1218 scb->expect = h->plen;
1219
1220 if (skb_tailroom(skb) < scb->expect) {
1221 kfree_skb(skb);
1222 hdev->reassembly[index] = NULL;
1223 return -ENOMEM;
1224 }
1225 }
1226 break;
1227
1228 case HCI_ACLDATA_PKT:
1229 if (skb->len == HCI_ACL_HDR_SIZE) {
1230 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1231 scb->expect = __le16_to_cpu(h->dlen);
1232
1233 if (skb_tailroom(skb) < scb->expect) {
1234 kfree_skb(skb);
1235 hdev->reassembly[index] = NULL;
1236 return -ENOMEM;
1237 }
1238 }
1239 break;
1240
1241 case HCI_SCODATA_PKT:
1242 if (skb->len == HCI_SCO_HDR_SIZE) {
1243 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1244 scb->expect = h->dlen;
1245
1246 if (skb_tailroom(skb) < scb->expect) {
1247 kfree_skb(skb);
1248 hdev->reassembly[index] = NULL;
1249 return -ENOMEM;
1250 }
1251 }
1252 break;
1253 }
1254
1255 if (scb->expect == 0) {
1256 /* Complete frame */
1257
1258 bt_cb(skb)->pkt_type = type;
1259 hci_recv_frame(skb);
1260
1261 hdev->reassembly[index] = NULL;
1262 return remain;
1263 }
1264 }
1265
1266 return remain;
1267}
1268
Marcel Holtmannef222012007-07-11 06:42:04 +02001269int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1270{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301271 int rem = 0;
1272
Marcel Holtmannef222012007-07-11 06:42:04 +02001273 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1274 return -EILSEQ;
1275
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001276 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301277 rem = hci_reassembly(hdev, type, data, count,
1278 type - 1, GFP_ATOMIC);
1279 if (rem < 0)
1280 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001281
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301282 data += (count - rem);
1283 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001284 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001285
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301286 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001287}
1288EXPORT_SYMBOL(hci_recv_fragment);
1289
Suraj Sumangala99811512010-07-14 13:02:19 +05301290#define STREAM_REASSEMBLY 0
1291
1292int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1293{
1294 int type;
1295 int rem = 0;
1296
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001297 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301298 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1299
1300 if (!skb) {
1301 struct { char type; } *pkt;
1302
1303 /* Start of the frame */
1304 pkt = data;
1305 type = pkt->type;
1306
1307 data++;
1308 count--;
1309 } else
1310 type = bt_cb(skb)->pkt_type;
1311
1312 rem = hci_reassembly(hdev, type, data,
1313 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1314 if (rem < 0)
1315 return rem;
1316
1317 data += (count - rem);
1318 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001319 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301320
1321 return rem;
1322}
1323EXPORT_SYMBOL(hci_recv_stream_fragment);
1324
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325/* ---- Interface to upper protocols ---- */
1326
1327/* Register/Unregister protocols.
1328 * hci_task_lock is used to ensure that no tasks are running. */
1329int hci_register_proto(struct hci_proto *hp)
1330{
1331 int err = 0;
1332
1333 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1334
1335 if (hp->id >= HCI_MAX_PROTO)
1336 return -EINVAL;
1337
1338 write_lock_bh(&hci_task_lock);
1339
1340 if (!hci_proto[hp->id])
1341 hci_proto[hp->id] = hp;
1342 else
1343 err = -EEXIST;
1344
1345 write_unlock_bh(&hci_task_lock);
1346
1347 return err;
1348}
1349EXPORT_SYMBOL(hci_register_proto);
1350
1351int hci_unregister_proto(struct hci_proto *hp)
1352{
1353 int err = 0;
1354
1355 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1356
1357 if (hp->id >= HCI_MAX_PROTO)
1358 return -EINVAL;
1359
1360 write_lock_bh(&hci_task_lock);
1361
1362 if (hci_proto[hp->id])
1363 hci_proto[hp->id] = NULL;
1364 else
1365 err = -ENOENT;
1366
1367 write_unlock_bh(&hci_task_lock);
1368
1369 return err;
1370}
1371EXPORT_SYMBOL(hci_unregister_proto);
1372
1373int hci_register_cb(struct hci_cb *cb)
1374{
1375 BT_DBG("%p name %s", cb, cb->name);
1376
1377 write_lock_bh(&hci_cb_list_lock);
1378 list_add(&cb->list, &hci_cb_list);
1379 write_unlock_bh(&hci_cb_list_lock);
1380
1381 return 0;
1382}
1383EXPORT_SYMBOL(hci_register_cb);
1384
1385int hci_unregister_cb(struct hci_cb *cb)
1386{
1387 BT_DBG("%p name %s", cb, cb->name);
1388
1389 write_lock_bh(&hci_cb_list_lock);
1390 list_del(&cb->list);
1391 write_unlock_bh(&hci_cb_list_lock);
1392
1393 return 0;
1394}
1395EXPORT_SYMBOL(hci_unregister_cb);
1396
1397static int hci_send_frame(struct sk_buff *skb)
1398{
1399 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1400
1401 if (!hdev) {
1402 kfree_skb(skb);
1403 return -ENODEV;
1404 }
1405
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001406 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
1408 if (atomic_read(&hdev->promisc)) {
1409 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001410 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001412 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 }
1414
1415 /* Get rid of skb owner, prior to sending to the driver. */
1416 skb_orphan(skb);
1417
1418 return hdev->send(skb);
1419}
1420
1421/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001422int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423{
1424 int len = HCI_COMMAND_HDR_SIZE + plen;
1425 struct hci_command_hdr *hdr;
1426 struct sk_buff *skb;
1427
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001428 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 skb = bt_skb_alloc(len, GFP_ATOMIC);
1431 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001432 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 return -ENOMEM;
1434 }
1435
1436 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001437 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 hdr->plen = plen;
1439
1440 if (plen)
1441 memcpy(skb_put(skb, plen), param, plen);
1442
1443 BT_DBG("skb len %d", skb->len);
1444
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001445 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001447
Johan Hedberga5040ef2011-01-10 13:28:59 +02001448 if (test_bit(HCI_INIT, &hdev->flags))
1449 hdev->init_last_cmd = opcode;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001452 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453
1454 return 0;
1455}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
1457/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001458void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459{
1460 struct hci_command_hdr *hdr;
1461
1462 if (!hdev->sent_cmd)
1463 return NULL;
1464
1465 hdr = (void *) hdev->sent_cmd->data;
1466
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001467 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 return NULL;
1469
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001470 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
1472 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1473}
1474
1475/* Send ACL data */
1476static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1477{
1478 struct hci_acl_hdr *hdr;
1479 int len = skb->len;
1480
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001481 skb_push(skb, HCI_ACL_HDR_SIZE);
1482 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001483 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001484 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1485 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486}
1487
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001488void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489{
1490 struct hci_dev *hdev = conn->hdev;
1491 struct sk_buff *list;
1492
1493 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1494
1495 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001496 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001497 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001499 list = skb_shinfo(skb)->frag_list;
1500 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 /* Non fragmented */
1502 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1503
1504 skb_queue_tail(&conn->data_q, skb);
1505 } else {
1506 /* Fragmented */
1507 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1508
1509 skb_shinfo(skb)->frag_list = NULL;
1510
1511 /* Queue all fragments atomically */
1512 spin_lock_bh(&conn->data_q.lock);
1513
1514 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001515
1516 flags &= ~ACL_START;
1517 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 do {
1519 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001522 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001523 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
1525 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1526
1527 __skb_queue_tail(&conn->data_q, skb);
1528 } while (list);
1529
1530 spin_unlock_bh(&conn->data_q.lock);
1531 }
1532
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001533 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535EXPORT_SYMBOL(hci_send_acl);
1536
1537/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001538void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539{
1540 struct hci_dev *hdev = conn->hdev;
1541 struct hci_sco_hdr hdr;
1542
1543 BT_DBG("%s len %d", hdev->name, skb->len);
1544
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001545 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 hdr.dlen = skb->len;
1547
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001548 skb_push(skb, HCI_SCO_HDR_SIZE);
1549 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001550 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
1552 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001553 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001554
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001556 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558EXPORT_SYMBOL(hci_send_sco);
1559
1560/* ---- HCI TX task (outgoing data) ---- */
1561
1562/* HCI Connection scheduler */
1563static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1564{
1565 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001566 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 int num = 0, min = ~0;
1568 struct list_head *p;
1569
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001570 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 * added and removed with TX task disabled. */
1572 list_for_each(p, &h->list) {
1573 struct hci_conn *c;
1574 c = list_entry(p, struct hci_conn, list);
1575
Marcel Holtmann769be972008-07-14 20:13:49 +02001576 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001578
1579 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1580 continue;
1581
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 num++;
1583
1584 if (c->sent < min) {
1585 min = c->sent;
1586 conn = c;
1587 }
1588 }
1589
1590 if (conn) {
1591 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1592 int q = cnt / num;
1593 *quote = q ? q : 1;
1594 } else
1595 *quote = 0;
1596
1597 BT_DBG("conn %p quote %d", conn, *quote);
1598 return conn;
1599}
1600
1601static inline void hci_acl_tx_to(struct hci_dev *hdev)
1602{
1603 struct hci_conn_hash *h = &hdev->conn_hash;
1604 struct list_head *p;
1605 struct hci_conn *c;
1606
1607 BT_ERR("%s ACL tx timeout", hdev->name);
1608
1609 /* Kill stalled connections */
1610 list_for_each(p, &h->list) {
1611 c = list_entry(p, struct hci_conn, list);
1612 if (c->type == ACL_LINK && c->sent) {
1613 BT_ERR("%s killing stalled ACL connection %s",
1614 hdev->name, batostr(&c->dst));
1615 hci_acl_disconn(c, 0x13);
1616 }
1617 }
1618}
1619
1620static inline void hci_sched_acl(struct hci_dev *hdev)
1621{
1622 struct hci_conn *conn;
1623 struct sk_buff *skb;
1624 int quote;
1625
1626 BT_DBG("%s", hdev->name);
1627
1628 if (!test_bit(HCI_RAW, &hdev->flags)) {
1629 /* ACL tx timeout must be longer than maximum
1630 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001631 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632 hci_acl_tx_to(hdev);
1633 }
1634
1635 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1636 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1637 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001638
1639 hci_conn_enter_active_mode(conn);
1640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 hci_send_frame(skb);
1642 hdev->acl_last_tx = jiffies;
1643
1644 hdev->acl_cnt--;
1645 conn->sent++;
1646 }
1647 }
1648}
1649
1650/* Schedule SCO */
1651static inline void hci_sched_sco(struct hci_dev *hdev)
1652{
1653 struct hci_conn *conn;
1654 struct sk_buff *skb;
1655 int quote;
1656
1657 BT_DBG("%s", hdev->name);
1658
1659 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1660 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1661 BT_DBG("skb %p len %d", skb, skb->len);
1662 hci_send_frame(skb);
1663
1664 conn->sent++;
1665 if (conn->sent == ~0)
1666 conn->sent = 0;
1667 }
1668 }
1669}
1670
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001671static inline void hci_sched_esco(struct hci_dev *hdev)
1672{
1673 struct hci_conn *conn;
1674 struct sk_buff *skb;
1675 int quote;
1676
1677 BT_DBG("%s", hdev->name);
1678
1679 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1680 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1681 BT_DBG("skb %p len %d", skb, skb->len);
1682 hci_send_frame(skb);
1683
1684 conn->sent++;
1685 if (conn->sent == ~0)
1686 conn->sent = 0;
1687 }
1688 }
1689}
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691static void hci_tx_task(unsigned long arg)
1692{
1693 struct hci_dev *hdev = (struct hci_dev *) arg;
1694 struct sk_buff *skb;
1695
1696 read_lock(&hci_task_lock);
1697
1698 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1699
1700 /* Schedule queues and send stuff to HCI driver */
1701
1702 hci_sched_acl(hdev);
1703
1704 hci_sched_sco(hdev);
1705
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001706 hci_sched_esco(hdev);
1707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 /* Send next queued raw (unknown type) packet */
1709 while ((skb = skb_dequeue(&hdev->raw_q)))
1710 hci_send_frame(skb);
1711
1712 read_unlock(&hci_task_lock);
1713}
1714
1715/* ----- HCI RX task (incoming data proccessing) ----- */
1716
1717/* ACL data packet */
1718static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1719{
1720 struct hci_acl_hdr *hdr = (void *) skb->data;
1721 struct hci_conn *conn;
1722 __u16 handle, flags;
1723
1724 skb_pull(skb, HCI_ACL_HDR_SIZE);
1725
1726 handle = __le16_to_cpu(hdr->handle);
1727 flags = hci_flags(handle);
1728 handle = hci_handle(handle);
1729
1730 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1731
1732 hdev->stat.acl_rx++;
1733
1734 hci_dev_lock(hdev);
1735 conn = hci_conn_hash_lookup_handle(hdev, handle);
1736 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 if (conn) {
1739 register struct hci_proto *hp;
1740
Marcel Holtmann04837f62006-07-03 10:02:33 +02001741 hci_conn_enter_active_mode(conn);
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001744 hp = hci_proto[HCI_PROTO_L2CAP];
1745 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 hp->recv_acldata(conn, skb, flags);
1747 return;
1748 }
1749 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001750 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 hdev->name, handle);
1752 }
1753
1754 kfree_skb(skb);
1755}
1756
1757/* SCO data packet */
1758static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1759{
1760 struct hci_sco_hdr *hdr = (void *) skb->data;
1761 struct hci_conn *conn;
1762 __u16 handle;
1763
1764 skb_pull(skb, HCI_SCO_HDR_SIZE);
1765
1766 handle = __le16_to_cpu(hdr->handle);
1767
1768 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1769
1770 hdev->stat.sco_rx++;
1771
1772 hci_dev_lock(hdev);
1773 conn = hci_conn_hash_lookup_handle(hdev, handle);
1774 hci_dev_unlock(hdev);
1775
1776 if (conn) {
1777 register struct hci_proto *hp;
1778
1779 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001780 hp = hci_proto[HCI_PROTO_SCO];
1781 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 hp->recv_scodata(conn, skb);
1783 return;
1784 }
1785 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001786 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 hdev->name, handle);
1788 }
1789
1790 kfree_skb(skb);
1791}
1792
Marcel Holtmann65164552005-10-28 19:20:48 +02001793static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794{
1795 struct hci_dev *hdev = (struct hci_dev *) arg;
1796 struct sk_buff *skb;
1797
1798 BT_DBG("%s", hdev->name);
1799
1800 read_lock(&hci_task_lock);
1801
1802 while ((skb = skb_dequeue(&hdev->rx_q))) {
1803 if (atomic_read(&hdev->promisc)) {
1804 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001805 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 }
1807
1808 if (test_bit(HCI_RAW, &hdev->flags)) {
1809 kfree_skb(skb);
1810 continue;
1811 }
1812
1813 if (test_bit(HCI_INIT, &hdev->flags)) {
1814 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001815 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 case HCI_ACLDATA_PKT:
1817 case HCI_SCODATA_PKT:
1818 kfree_skb(skb);
1819 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 }
1822
1823 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001824 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 case HCI_EVENT_PKT:
1826 hci_event_packet(hdev, skb);
1827 break;
1828
1829 case HCI_ACLDATA_PKT:
1830 BT_DBG("%s ACL data packet", hdev->name);
1831 hci_acldata_packet(hdev, skb);
1832 break;
1833
1834 case HCI_SCODATA_PKT:
1835 BT_DBG("%s SCO data packet", hdev->name);
1836 hci_scodata_packet(hdev, skb);
1837 break;
1838
1839 default:
1840 kfree_skb(skb);
1841 break;
1842 }
1843 }
1844
1845 read_unlock(&hci_task_lock);
1846}
1847
1848static void hci_cmd_task(unsigned long arg)
1849{
1850 struct hci_dev *hdev = (struct hci_dev *) arg;
1851 struct sk_buff *skb;
1852
1853 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1854
S.Çağlar Onur82453022008-02-17 23:25:57 -08001855 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 BT_ERR("%s command tx timeout", hdev->name);
1857 atomic_set(&hdev->cmd_cnt, 1);
1858 }
1859
1860 /* Send queued commands */
1861 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001862 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001864 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1865 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 atomic_dec(&hdev->cmd_cnt);
1867 hci_send_frame(skb);
1868 hdev->cmd_last_tx = jiffies;
1869 } else {
1870 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001871 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 }
1873 }
1874}