blob: 8ca8cf147058913acea2187223fe21d653ce9f32 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
Marcel Holtmanna418b892008-11-30 12:17:28 +0100300 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900306/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 * Device is held on return. */
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200361 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371
372 ie->next = cache->list;
373 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
432 if (!(hdev = hci_dev_get(ir.dev_id)))
433 return -ENODEV;
434
435 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900436 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200437 inquiry_cache_empty(hdev) ||
438 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 inquiry_cache_flush(hdev);
440 do_inquiry = 1;
441 }
442 hci_dev_unlock_bh(hdev);
443
Marcel Holtmann04837f62006-07-03 10:02:33 +0200444 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200445
446 if (do_inquiry) {
447 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
448 if (err < 0)
449 goto done;
450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 /* for unlimited number of responses we will use buffer with 255 entries */
453 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
454
455 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
456 * copy it to the user space.
457 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200458 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
459 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 err = -ENOMEM;
461 goto done;
462 }
463
464 hci_dev_lock_bh(hdev);
465 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
466 hci_dev_unlock_bh(hdev);
467
468 BT_DBG("num_rsp %d", ir.num_rsp);
469
470 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
471 ptr += sizeof(ir);
472 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
473 ir.num_rsp))
474 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900475 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 err = -EFAULT;
477
478 kfree(buf);
479
480done:
481 hci_dev_put(hdev);
482 return err;
483}
484
485/* ---- HCI ioctl helpers ---- */
486
487int hci_dev_open(__u16 dev)
488{
489 struct hci_dev *hdev;
490 int ret = 0;
491
492 if (!(hdev = hci_dev_get(dev)))
493 return -ENODEV;
494
495 BT_DBG("%s %p", hdev->name, hdev);
496
497 hci_req_lock(hdev);
498
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200499 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
500 ret = -ERFKILL;
501 goto done;
502 }
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 if (test_bit(HCI_UP, &hdev->flags)) {
505 ret = -EALREADY;
506 goto done;
507 }
508
509 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
510 set_bit(HCI_RAW, &hdev->flags);
511
Marcel Holtmann943da252010-02-13 02:28:41 +0100512 /* Treat all non BR/EDR controllers as raw devices for now */
513 if (hdev->dev_type != HCI_BREDR)
514 set_bit(HCI_RAW, &hdev->flags);
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 if (hdev->open(hdev)) {
517 ret = -EIO;
518 goto done;
519 }
520
521 if (!test_bit(HCI_RAW, &hdev->flags)) {
522 atomic_set(&hdev->cmd_cnt, 1);
523 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200524 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200527 ret = __hci_request(hdev, hci_init_req, 0,
528 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
530 clear_bit(HCI_INIT, &hdev->flags);
531 }
532
533 if (!ret) {
534 hci_dev_hold(hdev);
535 set_bit(HCI_UP, &hdev->flags);
536 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200537 if (!test_bit(HCI_SETUP, &hdev->flags))
538 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900539 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 /* Init failed, cleanup */
541 tasklet_kill(&hdev->rx_task);
542 tasklet_kill(&hdev->tx_task);
543 tasklet_kill(&hdev->cmd_task);
544
545 skb_queue_purge(&hdev->cmd_q);
546 skb_queue_purge(&hdev->rx_q);
547
548 if (hdev->flush)
549 hdev->flush(hdev);
550
551 if (hdev->sent_cmd) {
552 kfree_skb(hdev->sent_cmd);
553 hdev->sent_cmd = NULL;
554 }
555
556 hdev->close(hdev);
557 hdev->flags = 0;
558 }
559
560done:
561 hci_req_unlock(hdev);
562 hci_dev_put(hdev);
563 return ret;
564}
565
566static int hci_dev_do_close(struct hci_dev *hdev)
567{
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_cancel(hdev, ENODEV);
571 hci_req_lock(hdev);
572
573 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574 hci_req_unlock(hdev);
575 return 0;
576 }
577
578 /* Kill RX and TX tasks */
579 tasklet_kill(&hdev->rx_task);
580 tasklet_kill(&hdev->tx_task);
581
582 hci_dev_lock_bh(hdev);
583 inquiry_cache_flush(hdev);
584 hci_conn_hash_flush(hdev);
585 hci_dev_unlock_bh(hdev);
586
587 hci_notify(hdev, HCI_DEV_DOWN);
588
589 if (hdev->flush)
590 hdev->flush(hdev);
591
592 /* Reset device */
593 skb_queue_purge(&hdev->cmd_q);
594 atomic_set(&hdev->cmd_cnt, 1);
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200597 __hci_request(hdev, hci_reset_req, 0,
598 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 clear_bit(HCI_INIT, &hdev->flags);
600 }
601
602 /* Kill cmd task */
603 tasklet_kill(&hdev->cmd_task);
604
605 /* Drop queues */
606 skb_queue_purge(&hdev->rx_q);
607 skb_queue_purge(&hdev->cmd_q);
608 skb_queue_purge(&hdev->raw_q);
609
610 /* Drop last sent command */
611 if (hdev->sent_cmd) {
612 kfree_skb(hdev->sent_cmd);
613 hdev->sent_cmd = NULL;
614 }
615
616 /* After this point our queues are empty
617 * and no tasks are scheduled. */
618 hdev->close(hdev);
619
Johan Hedberg5add6af2010-12-16 10:00:37 +0200620 mgmt_powered(hdev->id, 0);
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 /* Clear flags */
623 hdev->flags = 0;
624
625 hci_req_unlock(hdev);
626
627 hci_dev_put(hdev);
628 return 0;
629}
630
631int hci_dev_close(__u16 dev)
632{
633 struct hci_dev *hdev;
634 int err;
635
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200636 hdev = hci_dev_get(dev);
637 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 return -ENODEV;
639 err = hci_dev_do_close(hdev);
640 hci_dev_put(hdev);
641 return err;
642}
643
644int hci_dev_reset(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200649 hdev = hci_dev_get(dev);
650 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return -ENODEV;
652
653 hci_req_lock(hdev);
654 tasklet_disable(&hdev->tx_task);
655
656 if (!test_bit(HCI_UP, &hdev->flags))
657 goto done;
658
659 /* Drop queues */
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662
663 hci_dev_lock_bh(hdev);
664 inquiry_cache_flush(hdev);
665 hci_conn_hash_flush(hdev);
666 hci_dev_unlock_bh(hdev);
667
668 if (hdev->flush)
669 hdev->flush(hdev);
670
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900671 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
673
674 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 ret = __hci_request(hdev, hci_reset_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678done:
679 tasklet_enable(&hdev->tx_task);
680 hci_req_unlock(hdev);
681 hci_dev_put(hdev);
682 return ret;
683}
684
685int hci_dev_reset_stat(__u16 dev)
686{
687 struct hci_dev *hdev;
688 int ret = 0;
689
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200690 hdev = hci_dev_get(dev);
691 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 return -ENODEV;
693
694 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
695
696 hci_dev_put(hdev);
697
698 return ret;
699}
700
701int hci_dev_cmd(unsigned int cmd, void __user *arg)
702{
703 struct hci_dev *hdev;
704 struct hci_dev_req dr;
705 int err = 0;
706
707 if (copy_from_user(&dr, arg, sizeof(dr)))
708 return -EFAULT;
709
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200710 hdev = hci_dev_get(dr.dev_id);
711 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 return -ENODEV;
713
714 switch (cmd) {
715 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200716 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
717 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 break;
719
720 case HCISETENCRYPT:
721 if (!lmp_encrypt_capable(hdev)) {
722 err = -EOPNOTSUPP;
723 break;
724 }
725
726 if (!test_bit(HCI_AUTH, &hdev->flags)) {
727 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200728 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
729 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (err)
731 break;
732 }
733
Marcel Holtmann04837f62006-07-03 10:02:33 +0200734 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
735 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 break;
737
738 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200739 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
740 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 break;
742
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200743 case HCISETLINKPOL:
744 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
745 msecs_to_jiffies(HCI_INIT_TIMEOUT));
746 break;
747
748 case HCISETLINKMODE:
749 hdev->link_mode = ((__u16) dr.dev_opt) &
750 (HCI_LM_MASTER | HCI_LM_ACCEPT);
751 break;
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 case HCISETPTYPE:
754 hdev->pkt_type = (__u16) dr.dev_opt;
755 break;
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
759 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 break;
761
762 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200763 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
764 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766
767 default:
768 err = -EINVAL;
769 break;
770 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200771
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 hci_dev_put(hdev);
773 return err;
774}
775
776int hci_get_dev_list(void __user *arg)
777{
778 struct hci_dev_list_req *dl;
779 struct hci_dev_req *dr;
780 struct list_head *p;
781 int n = 0, size, err;
782 __u16 dev_num;
783
784 if (get_user(dev_num, (__u16 __user *) arg))
785 return -EFAULT;
786
787 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
788 return -EINVAL;
789
790 size = sizeof(*dl) + dev_num * sizeof(*dr);
791
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200792 dl = kzalloc(size, GFP_KERNEL);
793 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 return -ENOMEM;
795
796 dr = dl->dev_req;
797
798 read_lock_bh(&hci_dev_list_lock);
799 list_for_each(p, &hci_dev_list) {
800 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200803
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200804 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200805
806 if (!test_bit(HCI_MGMT, &hdev->flags))
807 set_bit(HCI_PAIRABLE, &hdev->flags);
808
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 (dr + n)->dev_id = hdev->id;
810 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 if (++n >= dev_num)
813 break;
814 }
815 read_unlock_bh(&hci_dev_list_lock);
816
817 dl->dev_num = n;
818 size = sizeof(*dl) + n * sizeof(*dr);
819
820 err = copy_to_user(arg, dl, size);
821 kfree(dl);
822
823 return err ? -EFAULT : 0;
824}
825
826int hci_get_dev_info(void __user *arg)
827{
828 struct hci_dev *hdev;
829 struct hci_dev_info di;
830 int err = 0;
831
832 if (copy_from_user(&di, arg, sizeof(di)))
833 return -EFAULT;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 hdev = hci_dev_get(di.dev_id);
836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -ENODEV;
838
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200839 hci_del_off_timer(hdev);
840
Johan Hedbergc542a062011-01-26 13:11:03 +0200841 if (!test_bit(HCI_MGMT, &hdev->flags))
842 set_bit(HCI_PAIRABLE, &hdev->flags);
843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 strcpy(di.name, hdev->name);
845 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100846 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 di.flags = hdev->flags;
848 di.pkt_type = hdev->pkt_type;
849 di.acl_mtu = hdev->acl_mtu;
850 di.acl_pkts = hdev->acl_pkts;
851 di.sco_mtu = hdev->sco_mtu;
852 di.sco_pkts = hdev->sco_pkts;
853 di.link_policy = hdev->link_policy;
854 di.link_mode = hdev->link_mode;
855
856 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
857 memcpy(&di.features, &hdev->features, sizeof(di.features));
858
859 if (copy_to_user(arg, &di, sizeof(di)))
860 err = -EFAULT;
861
862 hci_dev_put(hdev);
863
864 return err;
865}
866
867/* ---- Interface to HCI drivers ---- */
868
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200869static int hci_rfkill_set_block(void *data, bool blocked)
870{
871 struct hci_dev *hdev = data;
872
873 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
874
875 if (!blocked)
876 return 0;
877
878 hci_dev_do_close(hdev);
879
880 return 0;
881}
882
883static const struct rfkill_ops hci_rfkill_ops = {
884 .set_block = hci_rfkill_set_block,
885};
886
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887/* Alloc HCI device */
888struct hci_dev *hci_alloc_dev(void)
889{
890 struct hci_dev *hdev;
891
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200892 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (!hdev)
894 return NULL;
895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 skb_queue_head_init(&hdev->driver_init);
897
898 return hdev;
899}
900EXPORT_SYMBOL(hci_alloc_dev);
901
902/* Free HCI device */
903void hci_free_dev(struct hci_dev *hdev)
904{
905 skb_queue_purge(&hdev->driver_init);
906
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200907 /* will free via device release */
908 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910EXPORT_SYMBOL(hci_free_dev);
911
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200912static void hci_power_on(struct work_struct *work)
913{
914 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
915
916 BT_DBG("%s", hdev->name);
917
918 if (hci_dev_open(hdev->id) < 0)
919 return;
920
921 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
922 mod_timer(&hdev->off_timer,
923 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
924
925 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
926 mgmt_index_added(hdev->id);
927}
928
929static void hci_power_off(struct work_struct *work)
930{
931 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
932
933 BT_DBG("%s", hdev->name);
934
935 hci_dev_close(hdev->id);
936}
937
938static void hci_auto_off(unsigned long data)
939{
940 struct hci_dev *hdev = (struct hci_dev *) data;
941
942 BT_DBG("%s", hdev->name);
943
944 clear_bit(HCI_AUTO_OFF, &hdev->flags);
945
946 queue_work(hdev->workqueue, &hdev->power_off);
947}
948
949void hci_del_off_timer(struct hci_dev *hdev)
950{
951 BT_DBG("%s", hdev->name);
952
953 clear_bit(HCI_AUTO_OFF, &hdev->flags);
954 del_timer(&hdev->off_timer);
955}
956
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200957int hci_uuids_clear(struct hci_dev *hdev)
958{
959 struct list_head *p, *n;
960
961 list_for_each_safe(p, n, &hdev->uuids) {
962 struct bt_uuid *uuid;
963
964 uuid = list_entry(p, struct bt_uuid, list);
965
966 list_del(p);
967 kfree(uuid);
968 }
969
970 return 0;
971}
972
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200973int hci_link_keys_clear(struct hci_dev *hdev)
974{
975 struct list_head *p, *n;
976
977 list_for_each_safe(p, n, &hdev->link_keys) {
978 struct link_key *key;
979
980 key = list_entry(p, struct link_key, list);
981
982 list_del(p);
983 kfree(key);
984 }
985
986 return 0;
987}
988
989struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
990{
991 struct list_head *p;
992
993 list_for_each(p, &hdev->link_keys) {
994 struct link_key *k;
995
996 k = list_entry(p, struct link_key, list);
997
998 if (bacmp(bdaddr, &k->bdaddr) == 0)
999 return k;
1000 }
1001
1002 return NULL;
1003}
1004
1005int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1006 u8 *val, u8 type, u8 pin_len)
1007{
1008 struct link_key *key, *old_key;
1009 u8 old_key_type;
1010
1011 old_key = hci_find_link_key(hdev, bdaddr);
1012 if (old_key) {
1013 old_key_type = old_key->type;
1014 key = old_key;
1015 } else {
1016 old_key_type = 0xff;
1017 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1018 if (!key)
1019 return -ENOMEM;
1020 list_add(&key->list, &hdev->link_keys);
1021 }
1022
1023 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1024
1025 bacpy(&key->bdaddr, bdaddr);
1026 memcpy(key->val, val, 16);
1027 key->type = type;
1028 key->pin_len = pin_len;
1029
1030 if (new_key)
1031 mgmt_new_key(hdev->id, key, old_key_type);
1032
1033 if (type == 0x06)
1034 key->type = old_key_type;
1035
1036 return 0;
1037}
1038
1039int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040{
1041 struct link_key *key;
1042
1043 key = hci_find_link_key(hdev, bdaddr);
1044 if (!key)
1045 return -ENOENT;
1046
1047 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1048
1049 list_del(&key->list);
1050 kfree(key);
1051
1052 return 0;
1053}
1054
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055/* Register HCI device */
1056int hci_register_dev(struct hci_dev *hdev)
1057{
1058 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001059 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001061 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1062 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063
1064 if (!hdev->open || !hdev->close || !hdev->destruct)
1065 return -EINVAL;
1066
1067 write_lock_bh(&hci_dev_list_lock);
1068
1069 /* Find first available device id */
1070 list_for_each(p, &hci_dev_list) {
1071 if (list_entry(p, struct hci_dev, list)->id != id)
1072 break;
1073 head = p; id++;
1074 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001075
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 sprintf(hdev->name, "hci%d", id);
1077 hdev->id = id;
1078 list_add(&hdev->list, head);
1079
1080 atomic_set(&hdev->refcnt, 1);
1081 spin_lock_init(&hdev->lock);
1082
1083 hdev->flags = 0;
1084 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001085 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 hdev->link_mode = (HCI_LM_ACCEPT);
1087
Marcel Holtmann04837f62006-07-03 10:02:33 +02001088 hdev->idle_timeout = 0;
1089 hdev->sniff_max_interval = 800;
1090 hdev->sniff_min_interval = 80;
1091
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001092 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1094 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1095
1096 skb_queue_head_init(&hdev->rx_q);
1097 skb_queue_head_init(&hdev->cmd_q);
1098 skb_queue_head_init(&hdev->raw_q);
1099
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301100 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001101 hdev->reassembly[i] = NULL;
1102
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001104 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105
1106 inquiry_cache_init(hdev);
1107
1108 hci_conn_hash_init(hdev);
1109
David Millerea4bd8b2010-07-30 21:54:49 -07001110 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001111
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001112 INIT_LIST_HEAD(&hdev->uuids);
1113
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001114 INIT_LIST_HEAD(&hdev->link_keys);
1115
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001116 INIT_WORK(&hdev->power_on, hci_power_on);
1117 INIT_WORK(&hdev->power_off, hci_power_off);
1118 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1121
1122 atomic_set(&hdev->promisc, 0);
1123
1124 write_unlock_bh(&hci_dev_list_lock);
1125
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001126 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1127 if (!hdev->workqueue)
1128 goto nomem;
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 hci_register_sysfs(hdev);
1131
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001132 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1133 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1134 if (hdev->rfkill) {
1135 if (rfkill_register(hdev->rfkill) < 0) {
1136 rfkill_destroy(hdev->rfkill);
1137 hdev->rfkill = NULL;
1138 }
1139 }
1140
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001141 set_bit(HCI_AUTO_OFF, &hdev->flags);
1142 set_bit(HCI_SETUP, &hdev->flags);
1143 queue_work(hdev->workqueue, &hdev->power_on);
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 hci_notify(hdev, HCI_DEV_REG);
1146
1147 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001148
1149nomem:
1150 write_lock_bh(&hci_dev_list_lock);
1151 list_del(&hdev->list);
1152 write_unlock_bh(&hci_dev_list_lock);
1153
1154 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155}
1156EXPORT_SYMBOL(hci_register_dev);
1157
1158/* Unregister HCI device */
1159int hci_unregister_dev(struct hci_dev *hdev)
1160{
Marcel Holtmannef222012007-07-11 06:42:04 +02001161 int i;
1162
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001163 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 write_lock_bh(&hci_dev_list_lock);
1166 list_del(&hdev->list);
1167 write_unlock_bh(&hci_dev_list_lock);
1168
1169 hci_dev_do_close(hdev);
1170
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301171 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001172 kfree_skb(hdev->reassembly[i]);
1173
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001174 if (!test_bit(HCI_INIT, &hdev->flags) &&
1175 !test_bit(HCI_SETUP, &hdev->flags))
1176 mgmt_index_removed(hdev->id);
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 hci_notify(hdev, HCI_DEV_UNREG);
1179
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001180 if (hdev->rfkill) {
1181 rfkill_unregister(hdev->rfkill);
1182 rfkill_destroy(hdev->rfkill);
1183 }
1184
Dave Young147e2d52008-03-05 18:45:59 -08001185 hci_unregister_sysfs(hdev);
1186
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001187 destroy_workqueue(hdev->workqueue);
1188
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001189 hci_dev_lock_bh(hdev);
1190 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001191 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001192 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001193 hci_dev_unlock_bh(hdev);
1194
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return 0;
1198}
1199EXPORT_SYMBOL(hci_unregister_dev);
1200
1201/* Suspend HCI device */
1202int hci_suspend_dev(struct hci_dev *hdev)
1203{
1204 hci_notify(hdev, HCI_DEV_SUSPEND);
1205 return 0;
1206}
1207EXPORT_SYMBOL(hci_suspend_dev);
1208
1209/* Resume HCI device */
1210int hci_resume_dev(struct hci_dev *hdev)
1211{
1212 hci_notify(hdev, HCI_DEV_RESUME);
1213 return 0;
1214}
1215EXPORT_SYMBOL(hci_resume_dev);
1216
Marcel Holtmann76bca882009-11-18 00:40:39 +01001217/* Receive frame from HCI drivers */
1218int hci_recv_frame(struct sk_buff *skb)
1219{
1220 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1221 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1222 && !test_bit(HCI_INIT, &hdev->flags))) {
1223 kfree_skb(skb);
1224 return -ENXIO;
1225 }
1226
1227 /* Incomming skb */
1228 bt_cb(skb)->incoming = 1;
1229
1230 /* Time stamp */
1231 __net_timestamp(skb);
1232
1233 /* Queue frame for rx task */
1234 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001235 tasklet_schedule(&hdev->rx_task);
1236
Marcel Holtmann76bca882009-11-18 00:40:39 +01001237 return 0;
1238}
1239EXPORT_SYMBOL(hci_recv_frame);
1240
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301241static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1242 int count, __u8 index, gfp_t gfp_mask)
1243{
1244 int len = 0;
1245 int hlen = 0;
1246 int remain = count;
1247 struct sk_buff *skb;
1248 struct bt_skb_cb *scb;
1249
1250 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1251 index >= NUM_REASSEMBLY)
1252 return -EILSEQ;
1253
1254 skb = hdev->reassembly[index];
1255
1256 if (!skb) {
1257 switch (type) {
1258 case HCI_ACLDATA_PKT:
1259 len = HCI_MAX_FRAME_SIZE;
1260 hlen = HCI_ACL_HDR_SIZE;
1261 break;
1262 case HCI_EVENT_PKT:
1263 len = HCI_MAX_EVENT_SIZE;
1264 hlen = HCI_EVENT_HDR_SIZE;
1265 break;
1266 case HCI_SCODATA_PKT:
1267 len = HCI_MAX_SCO_SIZE;
1268 hlen = HCI_SCO_HDR_SIZE;
1269 break;
1270 }
1271
1272 skb = bt_skb_alloc(len, gfp_mask);
1273 if (!skb)
1274 return -ENOMEM;
1275
1276 scb = (void *) skb->cb;
1277 scb->expect = hlen;
1278 scb->pkt_type = type;
1279
1280 skb->dev = (void *) hdev;
1281 hdev->reassembly[index] = skb;
1282 }
1283
1284 while (count) {
1285 scb = (void *) skb->cb;
1286 len = min(scb->expect, (__u16)count);
1287
1288 memcpy(skb_put(skb, len), data, len);
1289
1290 count -= len;
1291 data += len;
1292 scb->expect -= len;
1293 remain = count;
1294
1295 switch (type) {
1296 case HCI_EVENT_PKT:
1297 if (skb->len == HCI_EVENT_HDR_SIZE) {
1298 struct hci_event_hdr *h = hci_event_hdr(skb);
1299 scb->expect = h->plen;
1300
1301 if (skb_tailroom(skb) < scb->expect) {
1302 kfree_skb(skb);
1303 hdev->reassembly[index] = NULL;
1304 return -ENOMEM;
1305 }
1306 }
1307 break;
1308
1309 case HCI_ACLDATA_PKT:
1310 if (skb->len == HCI_ACL_HDR_SIZE) {
1311 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1312 scb->expect = __le16_to_cpu(h->dlen);
1313
1314 if (skb_tailroom(skb) < scb->expect) {
1315 kfree_skb(skb);
1316 hdev->reassembly[index] = NULL;
1317 return -ENOMEM;
1318 }
1319 }
1320 break;
1321
1322 case HCI_SCODATA_PKT:
1323 if (skb->len == HCI_SCO_HDR_SIZE) {
1324 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1325 scb->expect = h->dlen;
1326
1327 if (skb_tailroom(skb) < scb->expect) {
1328 kfree_skb(skb);
1329 hdev->reassembly[index] = NULL;
1330 return -ENOMEM;
1331 }
1332 }
1333 break;
1334 }
1335
1336 if (scb->expect == 0) {
1337 /* Complete frame */
1338
1339 bt_cb(skb)->pkt_type = type;
1340 hci_recv_frame(skb);
1341
1342 hdev->reassembly[index] = NULL;
1343 return remain;
1344 }
1345 }
1346
1347 return remain;
1348}
1349
Marcel Holtmannef222012007-07-11 06:42:04 +02001350int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1351{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301352 int rem = 0;
1353
Marcel Holtmannef222012007-07-11 06:42:04 +02001354 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1355 return -EILSEQ;
1356
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001357 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301358 rem = hci_reassembly(hdev, type, data, count,
1359 type - 1, GFP_ATOMIC);
1360 if (rem < 0)
1361 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001362
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301363 data += (count - rem);
1364 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001365 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001366
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301367 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001368}
1369EXPORT_SYMBOL(hci_recv_fragment);
1370
Suraj Sumangala99811512010-07-14 13:02:19 +05301371#define STREAM_REASSEMBLY 0
1372
1373int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1374{
1375 int type;
1376 int rem = 0;
1377
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001378 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301379 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1380
1381 if (!skb) {
1382 struct { char type; } *pkt;
1383
1384 /* Start of the frame */
1385 pkt = data;
1386 type = pkt->type;
1387
1388 data++;
1389 count--;
1390 } else
1391 type = bt_cb(skb)->pkt_type;
1392
1393 rem = hci_reassembly(hdev, type, data,
1394 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1395 if (rem < 0)
1396 return rem;
1397
1398 data += (count - rem);
1399 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001400 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301401
1402 return rem;
1403}
1404EXPORT_SYMBOL(hci_recv_stream_fragment);
1405
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406/* ---- Interface to upper protocols ---- */
1407
1408/* Register/Unregister protocols.
1409 * hci_task_lock is used to ensure that no tasks are running. */
1410int hci_register_proto(struct hci_proto *hp)
1411{
1412 int err = 0;
1413
1414 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1415
1416 if (hp->id >= HCI_MAX_PROTO)
1417 return -EINVAL;
1418
1419 write_lock_bh(&hci_task_lock);
1420
1421 if (!hci_proto[hp->id])
1422 hci_proto[hp->id] = hp;
1423 else
1424 err = -EEXIST;
1425
1426 write_unlock_bh(&hci_task_lock);
1427
1428 return err;
1429}
1430EXPORT_SYMBOL(hci_register_proto);
1431
1432int hci_unregister_proto(struct hci_proto *hp)
1433{
1434 int err = 0;
1435
1436 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1437
1438 if (hp->id >= HCI_MAX_PROTO)
1439 return -EINVAL;
1440
1441 write_lock_bh(&hci_task_lock);
1442
1443 if (hci_proto[hp->id])
1444 hci_proto[hp->id] = NULL;
1445 else
1446 err = -ENOENT;
1447
1448 write_unlock_bh(&hci_task_lock);
1449
1450 return err;
1451}
1452EXPORT_SYMBOL(hci_unregister_proto);
1453
1454int hci_register_cb(struct hci_cb *cb)
1455{
1456 BT_DBG("%p name %s", cb, cb->name);
1457
1458 write_lock_bh(&hci_cb_list_lock);
1459 list_add(&cb->list, &hci_cb_list);
1460 write_unlock_bh(&hci_cb_list_lock);
1461
1462 return 0;
1463}
1464EXPORT_SYMBOL(hci_register_cb);
1465
1466int hci_unregister_cb(struct hci_cb *cb)
1467{
1468 BT_DBG("%p name %s", cb, cb->name);
1469
1470 write_lock_bh(&hci_cb_list_lock);
1471 list_del(&cb->list);
1472 write_unlock_bh(&hci_cb_list_lock);
1473
1474 return 0;
1475}
1476EXPORT_SYMBOL(hci_unregister_cb);
1477
1478static int hci_send_frame(struct sk_buff *skb)
1479{
1480 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1481
1482 if (!hdev) {
1483 kfree_skb(skb);
1484 return -ENODEV;
1485 }
1486
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001487 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 if (atomic_read(&hdev->promisc)) {
1490 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001491 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001493 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 }
1495
1496 /* Get rid of skb owner, prior to sending to the driver. */
1497 skb_orphan(skb);
1498
1499 return hdev->send(skb);
1500}
1501
1502/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001503int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
1505 int len = HCI_COMMAND_HDR_SIZE + plen;
1506 struct hci_command_hdr *hdr;
1507 struct sk_buff *skb;
1508
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001509 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510
1511 skb = bt_skb_alloc(len, GFP_ATOMIC);
1512 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001513 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return -ENOMEM;
1515 }
1516
1517 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001518 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 hdr->plen = plen;
1520
1521 if (plen)
1522 memcpy(skb_put(skb, plen), param, plen);
1523
1524 BT_DBG("skb len %d", skb->len);
1525
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001526 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001528
Johan Hedberga5040ef2011-01-10 13:28:59 +02001529 if (test_bit(HCI_INIT, &hdev->flags))
1530 hdev->init_last_cmd = opcode;
1531
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001533 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 return 0;
1536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001539void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
1541 struct hci_command_hdr *hdr;
1542
1543 if (!hdev->sent_cmd)
1544 return NULL;
1545
1546 hdr = (void *) hdev->sent_cmd->data;
1547
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001548 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 return NULL;
1550
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001551 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
1553 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1554}
1555
1556/* Send ACL data */
1557static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1558{
1559 struct hci_acl_hdr *hdr;
1560 int len = skb->len;
1561
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001562 skb_push(skb, HCI_ACL_HDR_SIZE);
1563 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001564 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001565 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1566 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567}
1568
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001569void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570{
1571 struct hci_dev *hdev = conn->hdev;
1572 struct sk_buff *list;
1573
1574 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1575
1576 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001577 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001578 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001580 list = skb_shinfo(skb)->frag_list;
1581 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 /* Non fragmented */
1583 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1584
1585 skb_queue_tail(&conn->data_q, skb);
1586 } else {
1587 /* Fragmented */
1588 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1589
1590 skb_shinfo(skb)->frag_list = NULL;
1591
1592 /* Queue all fragments atomically */
1593 spin_lock_bh(&conn->data_q.lock);
1594
1595 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001596
1597 flags &= ~ACL_START;
1598 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 do {
1600 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001601
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001603 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001604 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
1606 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1607
1608 __skb_queue_tail(&conn->data_q, skb);
1609 } while (list);
1610
1611 spin_unlock_bh(&conn->data_q.lock);
1612 }
1613
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001614 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615}
1616EXPORT_SYMBOL(hci_send_acl);
1617
1618/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001619void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
1621 struct hci_dev *hdev = conn->hdev;
1622 struct hci_sco_hdr hdr;
1623
1624 BT_DBG("%s len %d", hdev->name, skb->len);
1625
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001626 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 hdr.dlen = skb->len;
1628
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001629 skb_push(skb, HCI_SCO_HDR_SIZE);
1630 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001631 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001634 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001635
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001637 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638}
1639EXPORT_SYMBOL(hci_send_sco);
1640
1641/* ---- HCI TX task (outgoing data) ---- */
1642
1643/* HCI Connection scheduler */
1644static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1645{
1646 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001647 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648 int num = 0, min = ~0;
1649 struct list_head *p;
1650
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001651 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 * added and removed with TX task disabled. */
1653 list_for_each(p, &h->list) {
1654 struct hci_conn *c;
1655 c = list_entry(p, struct hci_conn, list);
1656
Marcel Holtmann769be972008-07-14 20:13:49 +02001657 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001659
1660 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1661 continue;
1662
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 num++;
1664
1665 if (c->sent < min) {
1666 min = c->sent;
1667 conn = c;
1668 }
1669 }
1670
1671 if (conn) {
1672 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1673 int q = cnt / num;
1674 *quote = q ? q : 1;
1675 } else
1676 *quote = 0;
1677
1678 BT_DBG("conn %p quote %d", conn, *quote);
1679 return conn;
1680}
1681
1682static inline void hci_acl_tx_to(struct hci_dev *hdev)
1683{
1684 struct hci_conn_hash *h = &hdev->conn_hash;
1685 struct list_head *p;
1686 struct hci_conn *c;
1687
1688 BT_ERR("%s ACL tx timeout", hdev->name);
1689
1690 /* Kill stalled connections */
1691 list_for_each(p, &h->list) {
1692 c = list_entry(p, struct hci_conn, list);
1693 if (c->type == ACL_LINK && c->sent) {
1694 BT_ERR("%s killing stalled ACL connection %s",
1695 hdev->name, batostr(&c->dst));
1696 hci_acl_disconn(c, 0x13);
1697 }
1698 }
1699}
1700
1701static inline void hci_sched_acl(struct hci_dev *hdev)
1702{
1703 struct hci_conn *conn;
1704 struct sk_buff *skb;
1705 int quote;
1706
1707 BT_DBG("%s", hdev->name);
1708
1709 if (!test_bit(HCI_RAW, &hdev->flags)) {
1710 /* ACL tx timeout must be longer than maximum
1711 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001712 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 hci_acl_tx_to(hdev);
1714 }
1715
1716 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1717 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1718 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001719
1720 hci_conn_enter_active_mode(conn);
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 hci_send_frame(skb);
1723 hdev->acl_last_tx = jiffies;
1724
1725 hdev->acl_cnt--;
1726 conn->sent++;
1727 }
1728 }
1729}
1730
1731/* Schedule SCO */
1732static inline void hci_sched_sco(struct hci_dev *hdev)
1733{
1734 struct hci_conn *conn;
1735 struct sk_buff *skb;
1736 int quote;
1737
1738 BT_DBG("%s", hdev->name);
1739
1740 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1741 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1742 BT_DBG("skb %p len %d", skb, skb->len);
1743 hci_send_frame(skb);
1744
1745 conn->sent++;
1746 if (conn->sent == ~0)
1747 conn->sent = 0;
1748 }
1749 }
1750}
1751
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001752static inline void hci_sched_esco(struct hci_dev *hdev)
1753{
1754 struct hci_conn *conn;
1755 struct sk_buff *skb;
1756 int quote;
1757
1758 BT_DBG("%s", hdev->name);
1759
1760 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1761 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1762 BT_DBG("skb %p len %d", skb, skb->len);
1763 hci_send_frame(skb);
1764
1765 conn->sent++;
1766 if (conn->sent == ~0)
1767 conn->sent = 0;
1768 }
1769 }
1770}
1771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772static void hci_tx_task(unsigned long arg)
1773{
1774 struct hci_dev *hdev = (struct hci_dev *) arg;
1775 struct sk_buff *skb;
1776
1777 read_lock(&hci_task_lock);
1778
1779 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1780
1781 /* Schedule queues and send stuff to HCI driver */
1782
1783 hci_sched_acl(hdev);
1784
1785 hci_sched_sco(hdev);
1786
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001787 hci_sched_esco(hdev);
1788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789 /* Send next queued raw (unknown type) packet */
1790 while ((skb = skb_dequeue(&hdev->raw_q)))
1791 hci_send_frame(skb);
1792
1793 read_unlock(&hci_task_lock);
1794}
1795
1796/* ----- HCI RX task (incoming data proccessing) ----- */
1797
1798/* ACL data packet */
1799static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1800{
1801 struct hci_acl_hdr *hdr = (void *) skb->data;
1802 struct hci_conn *conn;
1803 __u16 handle, flags;
1804
1805 skb_pull(skb, HCI_ACL_HDR_SIZE);
1806
1807 handle = __le16_to_cpu(hdr->handle);
1808 flags = hci_flags(handle);
1809 handle = hci_handle(handle);
1810
1811 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1812
1813 hdev->stat.acl_rx++;
1814
1815 hci_dev_lock(hdev);
1816 conn = hci_conn_hash_lookup_handle(hdev, handle);
1817 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001818
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 if (conn) {
1820 register struct hci_proto *hp;
1821
Marcel Holtmann04837f62006-07-03 10:02:33 +02001822 hci_conn_enter_active_mode(conn);
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001825 hp = hci_proto[HCI_PROTO_L2CAP];
1826 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 hp->recv_acldata(conn, skb, flags);
1828 return;
1829 }
1830 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001831 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 hdev->name, handle);
1833 }
1834
1835 kfree_skb(skb);
1836}
1837
1838/* SCO data packet */
1839static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1840{
1841 struct hci_sco_hdr *hdr = (void *) skb->data;
1842 struct hci_conn *conn;
1843 __u16 handle;
1844
1845 skb_pull(skb, HCI_SCO_HDR_SIZE);
1846
1847 handle = __le16_to_cpu(hdr->handle);
1848
1849 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1850
1851 hdev->stat.sco_rx++;
1852
1853 hci_dev_lock(hdev);
1854 conn = hci_conn_hash_lookup_handle(hdev, handle);
1855 hci_dev_unlock(hdev);
1856
1857 if (conn) {
1858 register struct hci_proto *hp;
1859
1860 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001861 hp = hci_proto[HCI_PROTO_SCO];
1862 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 hp->recv_scodata(conn, skb);
1864 return;
1865 }
1866 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001867 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 hdev->name, handle);
1869 }
1870
1871 kfree_skb(skb);
1872}
1873
Marcel Holtmann65164552005-10-28 19:20:48 +02001874static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
1876 struct hci_dev *hdev = (struct hci_dev *) arg;
1877 struct sk_buff *skb;
1878
1879 BT_DBG("%s", hdev->name);
1880
1881 read_lock(&hci_task_lock);
1882
1883 while ((skb = skb_dequeue(&hdev->rx_q))) {
1884 if (atomic_read(&hdev->promisc)) {
1885 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001886 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887 }
1888
1889 if (test_bit(HCI_RAW, &hdev->flags)) {
1890 kfree_skb(skb);
1891 continue;
1892 }
1893
1894 if (test_bit(HCI_INIT, &hdev->flags)) {
1895 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001896 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 case HCI_ACLDATA_PKT:
1898 case HCI_SCODATA_PKT:
1899 kfree_skb(skb);
1900 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001901 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 }
1903
1904 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001905 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 case HCI_EVENT_PKT:
1907 hci_event_packet(hdev, skb);
1908 break;
1909
1910 case HCI_ACLDATA_PKT:
1911 BT_DBG("%s ACL data packet", hdev->name);
1912 hci_acldata_packet(hdev, skb);
1913 break;
1914
1915 case HCI_SCODATA_PKT:
1916 BT_DBG("%s SCO data packet", hdev->name);
1917 hci_scodata_packet(hdev, skb);
1918 break;
1919
1920 default:
1921 kfree_skb(skb);
1922 break;
1923 }
1924 }
1925
1926 read_unlock(&hci_task_lock);
1927}
1928
1929static void hci_cmd_task(unsigned long arg)
1930{
1931 struct hci_dev *hdev = (struct hci_dev *) arg;
1932 struct sk_buff *skb;
1933
1934 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1935
S.Çağlar Onur82453022008-02-17 23:25:57 -08001936 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 BT_ERR("%s command tx timeout", hdev->name);
1938 atomic_set(&hdev->cmd_cnt, 1);
1939 }
1940
1941 /* Send queued commands */
1942 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
Wei Yongjun7585b972009-02-25 18:29:52 +08001943 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001945 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1946 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 atomic_dec(&hdev->cmd_cnt);
1948 hci_send_frame(skb);
1949 hdev->cmd_last_tx = jiffies;
1950 } else {
1951 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001952 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 }
1954 }
1955}