blob: 2f003224d2ea9e4d9727c643f03ad3ec35c17b7f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
266static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
267{
268 __u8 scan = opt;
269
270 BT_DBG("%s %x", hdev->name, scan);
271
272 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200273 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274}
275
276static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
277{
278 __u8 auth = opt;
279
280 BT_DBG("%s %x", hdev->name, auth);
281
282 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200283 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284}
285
286static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
287{
288 __u8 encrypt = opt;
289
290 BT_DBG("%s %x", hdev->name, encrypt);
291
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200292 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200293 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
295
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200296static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
297{
298 __le16 policy = cpu_to_le16(opt);
299
Marcel Holtmanna418b892008-11-30 12:17:28 +0100300 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200301
302 /* Default link policy */
303 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
304}
305
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900306/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 * Device is held on return. */
308struct hci_dev *hci_dev_get(int index)
309{
310 struct hci_dev *hdev = NULL;
311 struct list_head *p;
312
313 BT_DBG("%d", index);
314
315 if (index < 0)
316 return NULL;
317
318 read_lock(&hci_dev_list_lock);
319 list_for_each(p, &hci_dev_list) {
320 struct hci_dev *d = list_entry(p, struct hci_dev, list);
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
323 break;
324 }
325 }
326 read_unlock(&hci_dev_list_lock);
327 return hdev;
328}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330/* ---- Inquiry support ---- */
331static void inquiry_cache_flush(struct hci_dev *hdev)
332{
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
335
336 BT_DBG("cache %p", cache);
337
338 cache->list = NULL;
339 while ((e = next)) {
340 next = e->next;
341 kfree(e);
342 }
343}
344
345struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346{
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
349
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
354 break;
355 return e;
356}
357
358void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200361 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200371
372 ie->next = cache->list;
373 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 }
375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 cache->timestamp = jiffies;
379}
380
381static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382{
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
386 int copied = 0;
387
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
396 info++;
397 }
398
399 BT_DBG("cache %p, copied %d", cache, copied);
400 return copied;
401}
402
403static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404{
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
407
408 BT_DBG("%s", hdev->name);
409
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
411 return;
412
413 /* Start Inquiry */
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420int hci_inquiry(void __user *arg)
421{
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
426 long timeo;
427 __u8 *buf;
428
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
430 return -EFAULT;
431
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200432 hdev = hci_dev_get(ir.dev_id);
433 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 return -ENODEV;
435
436 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900437 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200438 inquiry_cache_empty(hdev) ||
439 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 inquiry_cache_flush(hdev);
441 do_inquiry = 1;
442 }
443 hci_dev_unlock_bh(hdev);
444
Marcel Holtmann04837f62006-07-03 10:02:33 +0200445 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200446
447 if (do_inquiry) {
448 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449 if (err < 0)
450 goto done;
451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452
453 /* for unlimited number of responses we will use buffer with 255 entries */
454 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 * copy it to the user space.
458 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200459 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
460 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 err = -ENOMEM;
462 goto done;
463 }
464
465 hci_dev_lock_bh(hdev);
466 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467 hci_dev_unlock_bh(hdev);
468
469 BT_DBG("num_rsp %d", ir.num_rsp);
470
471 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472 ptr += sizeof(ir);
473 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474 ir.num_rsp))
475 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900476 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 err = -EFAULT;
478
479 kfree(buf);
480
481done:
482 hci_dev_put(hdev);
483 return err;
484}
485
486/* ---- HCI ioctl helpers ---- */
487
488int hci_dev_open(__u16 dev)
489{
490 struct hci_dev *hdev;
491 int ret = 0;
492
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200493 hdev = hci_dev_get(dev);
494 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 return -ENODEV;
496
497 BT_DBG("%s %p", hdev->name, hdev);
498
499 hci_req_lock(hdev);
500
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502 ret = -ERFKILL;
503 goto done;
504 }
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 if (test_bit(HCI_UP, &hdev->flags)) {
507 ret = -EALREADY;
508 goto done;
509 }
510
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
513
Marcel Holtmann943da252010-02-13 02:28:41 +0100514 /* Treat all non BR/EDR controllers as raw devices for now */
515 if (hdev->dev_type != HCI_BREDR)
516 set_bit(HCI_RAW, &hdev->flags);
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 if (hdev->open(hdev)) {
519 ret = -EIO;
520 goto done;
521 }
522
523 if (!test_bit(HCI_RAW, &hdev->flags)) {
524 atomic_set(&hdev->cmd_cnt, 1);
525 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200526 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
528 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200529 ret = __hci_request(hdev, hci_init_req, 0,
530 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 clear_bit(HCI_INIT, &hdev->flags);
533 }
534
535 if (!ret) {
536 hci_dev_hold(hdev);
537 set_bit(HCI_UP, &hdev->flags);
538 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200539 if (!test_bit(HCI_SETUP, &hdev->flags))
540 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900541 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 /* Init failed, cleanup */
543 tasklet_kill(&hdev->rx_task);
544 tasklet_kill(&hdev->tx_task);
545 tasklet_kill(&hdev->cmd_task);
546
547 skb_queue_purge(&hdev->cmd_q);
548 skb_queue_purge(&hdev->rx_q);
549
550 if (hdev->flush)
551 hdev->flush(hdev);
552
553 if (hdev->sent_cmd) {
554 kfree_skb(hdev->sent_cmd);
555 hdev->sent_cmd = NULL;
556 }
557
558 hdev->close(hdev);
559 hdev->flags = 0;
560 }
561
562done:
563 hci_req_unlock(hdev);
564 hci_dev_put(hdev);
565 return ret;
566}
567
568static int hci_dev_do_close(struct hci_dev *hdev)
569{
570 BT_DBG("%s %p", hdev->name, hdev);
571
572 hci_req_cancel(hdev, ENODEV);
573 hci_req_lock(hdev);
574
575 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
576 hci_req_unlock(hdev);
577 return 0;
578 }
579
580 /* Kill RX and TX tasks */
581 tasklet_kill(&hdev->rx_task);
582 tasklet_kill(&hdev->tx_task);
583
584 hci_dev_lock_bh(hdev);
585 inquiry_cache_flush(hdev);
586 hci_conn_hash_flush(hdev);
587 hci_dev_unlock_bh(hdev);
588
589 hci_notify(hdev, HCI_DEV_DOWN);
590
591 if (hdev->flush)
592 hdev->flush(hdev);
593
594 /* Reset device */
595 skb_queue_purge(&hdev->cmd_q);
596 atomic_set(&hdev->cmd_cnt, 1);
597 if (!test_bit(HCI_RAW, &hdev->flags)) {
598 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200599 __hci_request(hdev, hci_reset_req, 0,
600 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 clear_bit(HCI_INIT, &hdev->flags);
602 }
603
604 /* Kill cmd task */
605 tasklet_kill(&hdev->cmd_task);
606
607 /* Drop queues */
608 skb_queue_purge(&hdev->rx_q);
609 skb_queue_purge(&hdev->cmd_q);
610 skb_queue_purge(&hdev->raw_q);
611
612 /* Drop last sent command */
613 if (hdev->sent_cmd) {
614 kfree_skb(hdev->sent_cmd);
615 hdev->sent_cmd = NULL;
616 }
617
618 /* After this point our queues are empty
619 * and no tasks are scheduled. */
620 hdev->close(hdev);
621
Johan Hedberg5add6af2010-12-16 10:00:37 +0200622 mgmt_powered(hdev->id, 0);
623
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 /* Clear flags */
625 hdev->flags = 0;
626
627 hci_req_unlock(hdev);
628
629 hci_dev_put(hdev);
630 return 0;
631}
632
633int hci_dev_close(__u16 dev)
634{
635 struct hci_dev *hdev;
636 int err;
637
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200638 hdev = hci_dev_get(dev);
639 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 return -ENODEV;
641 err = hci_dev_do_close(hdev);
642 hci_dev_put(hdev);
643 return err;
644}
645
646int hci_dev_reset(__u16 dev)
647{
648 struct hci_dev *hdev;
649 int ret = 0;
650
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200651 hdev = hci_dev_get(dev);
652 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 return -ENODEV;
654
655 hci_req_lock(hdev);
656 tasklet_disable(&hdev->tx_task);
657
658 if (!test_bit(HCI_UP, &hdev->flags))
659 goto done;
660
661 /* Drop queues */
662 skb_queue_purge(&hdev->rx_q);
663 skb_queue_purge(&hdev->cmd_q);
664
665 hci_dev_lock_bh(hdev);
666 inquiry_cache_flush(hdev);
667 hci_conn_hash_flush(hdev);
668 hci_dev_unlock_bh(hdev);
669
670 if (hdev->flush)
671 hdev->flush(hdev);
672
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900673 atomic_set(&hdev->cmd_cnt, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
675
676 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200677 ret = __hci_request(hdev, hci_reset_req, 0,
678 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680done:
681 tasklet_enable(&hdev->tx_task);
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687int hci_dev_reset_stat(__u16 dev)
688{
689 struct hci_dev *hdev;
690 int ret = 0;
691
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200692 hdev = hci_dev_get(dev);
693 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return -ENODEV;
695
696 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
697
698 hci_dev_put(hdev);
699
700 return ret;
701}
702
703int hci_dev_cmd(unsigned int cmd, void __user *arg)
704{
705 struct hci_dev *hdev;
706 struct hci_dev_req dr;
707 int err = 0;
708
709 if (copy_from_user(&dr, arg, sizeof(dr)))
710 return -EFAULT;
711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200712 hdev = hci_dev_get(dr.dev_id);
713 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 return -ENODEV;
715
716 switch (cmd) {
717 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200718 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
719 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 break;
721
722 case HCISETENCRYPT:
723 if (!lmp_encrypt_capable(hdev)) {
724 err = -EOPNOTSUPP;
725 break;
726 }
727
728 if (!test_bit(HCI_AUTH, &hdev->flags)) {
729 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 if (err)
733 break;
734 }
735
Marcel Holtmann04837f62006-07-03 10:02:33 +0200736 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 break;
739
740 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200741 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
742 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 break;
744
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200745 case HCISETLINKPOL:
746 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
747 msecs_to_jiffies(HCI_INIT_TIMEOUT));
748 break;
749
750 case HCISETLINKMODE:
751 hdev->link_mode = ((__u16) dr.dev_opt) &
752 (HCI_LM_MASTER | HCI_LM_ACCEPT);
753 break;
754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 case HCISETPTYPE:
756 hdev->pkt_type = (__u16) dr.dev_opt;
757 break;
758
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200760 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
761 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 break;
763
764 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200765 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
766 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 break;
768
769 default:
770 err = -EINVAL;
771 break;
772 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 hci_dev_put(hdev);
775 return err;
776}
777
778int hci_get_dev_list(void __user *arg)
779{
780 struct hci_dev_list_req *dl;
781 struct hci_dev_req *dr;
782 struct list_head *p;
783 int n = 0, size, err;
784 __u16 dev_num;
785
786 if (get_user(dev_num, (__u16 __user *) arg))
787 return -EFAULT;
788
789 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
790 return -EINVAL;
791
792 size = sizeof(*dl) + dev_num * sizeof(*dr);
793
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200794 dl = kzalloc(size, GFP_KERNEL);
795 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return -ENOMEM;
797
798 dr = dl->dev_req;
799
800 read_lock_bh(&hci_dev_list_lock);
801 list_for_each(p, &hci_dev_list) {
802 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200805
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200806 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200807
808 if (!test_bit(HCI_MGMT, &hdev->flags))
809 set_bit(HCI_PAIRABLE, &hdev->flags);
810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 (dr + n)->dev_id = hdev->id;
812 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (++n >= dev_num)
815 break;
816 }
817 read_unlock_bh(&hci_dev_list_lock);
818
819 dl->dev_num = n;
820 size = sizeof(*dl) + n * sizeof(*dr);
821
822 err = copy_to_user(arg, dl, size);
823 kfree(dl);
824
825 return err ? -EFAULT : 0;
826}
827
828int hci_get_dev_info(void __user *arg)
829{
830 struct hci_dev *hdev;
831 struct hci_dev_info di;
832 int err = 0;
833
834 if (copy_from_user(&di, arg, sizeof(di)))
835 return -EFAULT;
836
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200837 hdev = hci_dev_get(di.dev_id);
838 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return -ENODEV;
840
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200841 hci_del_off_timer(hdev);
842
Johan Hedbergc542a062011-01-26 13:11:03 +0200843 if (!test_bit(HCI_MGMT, &hdev->flags))
844 set_bit(HCI_PAIRABLE, &hdev->flags);
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 strcpy(di.name, hdev->name);
847 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100848 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 di.flags = hdev->flags;
850 di.pkt_type = hdev->pkt_type;
851 di.acl_mtu = hdev->acl_mtu;
852 di.acl_pkts = hdev->acl_pkts;
853 di.sco_mtu = hdev->sco_mtu;
854 di.sco_pkts = hdev->sco_pkts;
855 di.link_policy = hdev->link_policy;
856 di.link_mode = hdev->link_mode;
857
858 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
859 memcpy(&di.features, &hdev->features, sizeof(di.features));
860
861 if (copy_to_user(arg, &di, sizeof(di)))
862 err = -EFAULT;
863
864 hci_dev_put(hdev);
865
866 return err;
867}
868
869/* ---- Interface to HCI drivers ---- */
870
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200871static int hci_rfkill_set_block(void *data, bool blocked)
872{
873 struct hci_dev *hdev = data;
874
875 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
876
877 if (!blocked)
878 return 0;
879
880 hci_dev_do_close(hdev);
881
882 return 0;
883}
884
885static const struct rfkill_ops hci_rfkill_ops = {
886 .set_block = hci_rfkill_set_block,
887};
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889/* Alloc HCI device */
890struct hci_dev *hci_alloc_dev(void)
891{
892 struct hci_dev *hdev;
893
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200894 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (!hdev)
896 return NULL;
897
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 skb_queue_head_init(&hdev->driver_init);
899
900 return hdev;
901}
902EXPORT_SYMBOL(hci_alloc_dev);
903
904/* Free HCI device */
905void hci_free_dev(struct hci_dev *hdev)
906{
907 skb_queue_purge(&hdev->driver_init);
908
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200909 /* will free via device release */
910 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911}
912EXPORT_SYMBOL(hci_free_dev);
913
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200914static void hci_power_on(struct work_struct *work)
915{
916 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
917
918 BT_DBG("%s", hdev->name);
919
920 if (hci_dev_open(hdev->id) < 0)
921 return;
922
923 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
924 mod_timer(&hdev->off_timer,
925 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
926
927 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
928 mgmt_index_added(hdev->id);
929}
930
931static void hci_power_off(struct work_struct *work)
932{
933 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
934
935 BT_DBG("%s", hdev->name);
936
937 hci_dev_close(hdev->id);
938}
939
940static void hci_auto_off(unsigned long data)
941{
942 struct hci_dev *hdev = (struct hci_dev *) data;
943
944 BT_DBG("%s", hdev->name);
945
946 clear_bit(HCI_AUTO_OFF, &hdev->flags);
947
948 queue_work(hdev->workqueue, &hdev->power_off);
949}
950
951void hci_del_off_timer(struct hci_dev *hdev)
952{
953 BT_DBG("%s", hdev->name);
954
955 clear_bit(HCI_AUTO_OFF, &hdev->flags);
956 del_timer(&hdev->off_timer);
957}
958
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200959int hci_uuids_clear(struct hci_dev *hdev)
960{
961 struct list_head *p, *n;
962
963 list_for_each_safe(p, n, &hdev->uuids) {
964 struct bt_uuid *uuid;
965
966 uuid = list_entry(p, struct bt_uuid, list);
967
968 list_del(p);
969 kfree(uuid);
970 }
971
972 return 0;
973}
974
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200975int hci_link_keys_clear(struct hci_dev *hdev)
976{
977 struct list_head *p, *n;
978
979 list_for_each_safe(p, n, &hdev->link_keys) {
980 struct link_key *key;
981
982 key = list_entry(p, struct link_key, list);
983
984 list_del(p);
985 kfree(key);
986 }
987
988 return 0;
989}
990
991struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
992{
993 struct list_head *p;
994
995 list_for_each(p, &hdev->link_keys) {
996 struct link_key *k;
997
998 k = list_entry(p, struct link_key, list);
999
1000 if (bacmp(bdaddr, &k->bdaddr) == 0)
1001 return k;
1002 }
1003
1004 return NULL;
1005}
1006
1007int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1008 u8 *val, u8 type, u8 pin_len)
1009{
1010 struct link_key *key, *old_key;
1011 u8 old_key_type;
1012
1013 old_key = hci_find_link_key(hdev, bdaddr);
1014 if (old_key) {
1015 old_key_type = old_key->type;
1016 key = old_key;
1017 } else {
1018 old_key_type = 0xff;
1019 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1020 if (!key)
1021 return -ENOMEM;
1022 list_add(&key->list, &hdev->link_keys);
1023 }
1024
1025 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1026
1027 bacpy(&key->bdaddr, bdaddr);
1028 memcpy(key->val, val, 16);
1029 key->type = type;
1030 key->pin_len = pin_len;
1031
1032 if (new_key)
1033 mgmt_new_key(hdev->id, key, old_key_type);
1034
1035 if (type == 0x06)
1036 key->type = old_key_type;
1037
1038 return 0;
1039}
1040
1041int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1042{
1043 struct link_key *key;
1044
1045 key = hci_find_link_key(hdev, bdaddr);
1046 if (!key)
1047 return -ENOENT;
1048
1049 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1050
1051 list_del(&key->list);
1052 kfree(key);
1053
1054 return 0;
1055}
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057/* Register HCI device */
1058int hci_register_dev(struct hci_dev *hdev)
1059{
1060 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001061 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001063 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1064 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065
1066 if (!hdev->open || !hdev->close || !hdev->destruct)
1067 return -EINVAL;
1068
1069 write_lock_bh(&hci_dev_list_lock);
1070
1071 /* Find first available device id */
1072 list_for_each(p, &hci_dev_list) {
1073 if (list_entry(p, struct hci_dev, list)->id != id)
1074 break;
1075 head = p; id++;
1076 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 sprintf(hdev->name, "hci%d", id);
1079 hdev->id = id;
1080 list_add(&hdev->list, head);
1081
1082 atomic_set(&hdev->refcnt, 1);
1083 spin_lock_init(&hdev->lock);
1084
1085 hdev->flags = 0;
1086 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001087 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001089 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
Marcel Holtmann04837f62006-07-03 10:02:33 +02001091 hdev->idle_timeout = 0;
1092 hdev->sniff_max_interval = 800;
1093 hdev->sniff_min_interval = 80;
1094
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001095 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1097 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1098
1099 skb_queue_head_init(&hdev->rx_q);
1100 skb_queue_head_init(&hdev->cmd_q);
1101 skb_queue_head_init(&hdev->raw_q);
1102
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301103 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001104 hdev->reassembly[i] = NULL;
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001107 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 inquiry_cache_init(hdev);
1110
1111 hci_conn_hash_init(hdev);
1112
David Millerea4bd8b2010-07-30 21:54:49 -07001113 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001114
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001115 INIT_LIST_HEAD(&hdev->uuids);
1116
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001117 INIT_LIST_HEAD(&hdev->link_keys);
1118
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001119 INIT_WORK(&hdev->power_on, hci_power_on);
1120 INIT_WORK(&hdev->power_off, hci_power_off);
1121 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1124
1125 atomic_set(&hdev->promisc, 0);
1126
1127 write_unlock_bh(&hci_dev_list_lock);
1128
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001129 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1130 if (!hdev->workqueue)
1131 goto nomem;
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 hci_register_sysfs(hdev);
1134
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001135 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1136 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1137 if (hdev->rfkill) {
1138 if (rfkill_register(hdev->rfkill) < 0) {
1139 rfkill_destroy(hdev->rfkill);
1140 hdev->rfkill = NULL;
1141 }
1142 }
1143
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001144 set_bit(HCI_AUTO_OFF, &hdev->flags);
1145 set_bit(HCI_SETUP, &hdev->flags);
1146 queue_work(hdev->workqueue, &hdev->power_on);
1147
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 hci_notify(hdev, HCI_DEV_REG);
1149
1150 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001151
1152nomem:
1153 write_lock_bh(&hci_dev_list_lock);
1154 list_del(&hdev->list);
1155 write_unlock_bh(&hci_dev_list_lock);
1156
1157 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158}
1159EXPORT_SYMBOL(hci_register_dev);
1160
1161/* Unregister HCI device */
1162int hci_unregister_dev(struct hci_dev *hdev)
1163{
Marcel Holtmannef222012007-07-11 06:42:04 +02001164 int i;
1165
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001166 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 write_lock_bh(&hci_dev_list_lock);
1169 list_del(&hdev->list);
1170 write_unlock_bh(&hci_dev_list_lock);
1171
1172 hci_dev_do_close(hdev);
1173
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301174 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001175 kfree_skb(hdev->reassembly[i]);
1176
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001177 if (!test_bit(HCI_INIT, &hdev->flags) &&
1178 !test_bit(HCI_SETUP, &hdev->flags))
1179 mgmt_index_removed(hdev->id);
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 hci_notify(hdev, HCI_DEV_UNREG);
1182
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001183 if (hdev->rfkill) {
1184 rfkill_unregister(hdev->rfkill);
1185 rfkill_destroy(hdev->rfkill);
1186 }
1187
Dave Young147e2d52008-03-05 18:45:59 -08001188 hci_unregister_sysfs(hdev);
1189
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001190 destroy_workqueue(hdev->workqueue);
1191
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001192 hci_dev_lock_bh(hdev);
1193 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001194 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001195 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001196 hci_dev_unlock_bh(hdev);
1197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001199
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 return 0;
1201}
1202EXPORT_SYMBOL(hci_unregister_dev);
1203
1204/* Suspend HCI device */
1205int hci_suspend_dev(struct hci_dev *hdev)
1206{
1207 hci_notify(hdev, HCI_DEV_SUSPEND);
1208 return 0;
1209}
1210EXPORT_SYMBOL(hci_suspend_dev);
1211
1212/* Resume HCI device */
1213int hci_resume_dev(struct hci_dev *hdev)
1214{
1215 hci_notify(hdev, HCI_DEV_RESUME);
1216 return 0;
1217}
1218EXPORT_SYMBOL(hci_resume_dev);
1219
Marcel Holtmann76bca882009-11-18 00:40:39 +01001220/* Receive frame from HCI drivers */
1221int hci_recv_frame(struct sk_buff *skb)
1222{
1223 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1224 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1225 && !test_bit(HCI_INIT, &hdev->flags))) {
1226 kfree_skb(skb);
1227 return -ENXIO;
1228 }
1229
1230 /* Incomming skb */
1231 bt_cb(skb)->incoming = 1;
1232
1233 /* Time stamp */
1234 __net_timestamp(skb);
1235
1236 /* Queue frame for rx task */
1237 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001238 tasklet_schedule(&hdev->rx_task);
1239
Marcel Holtmann76bca882009-11-18 00:40:39 +01001240 return 0;
1241}
1242EXPORT_SYMBOL(hci_recv_frame);
1243
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301244static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1245 int count, __u8 index, gfp_t gfp_mask)
1246{
1247 int len = 0;
1248 int hlen = 0;
1249 int remain = count;
1250 struct sk_buff *skb;
1251 struct bt_skb_cb *scb;
1252
1253 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1254 index >= NUM_REASSEMBLY)
1255 return -EILSEQ;
1256
1257 skb = hdev->reassembly[index];
1258
1259 if (!skb) {
1260 switch (type) {
1261 case HCI_ACLDATA_PKT:
1262 len = HCI_MAX_FRAME_SIZE;
1263 hlen = HCI_ACL_HDR_SIZE;
1264 break;
1265 case HCI_EVENT_PKT:
1266 len = HCI_MAX_EVENT_SIZE;
1267 hlen = HCI_EVENT_HDR_SIZE;
1268 break;
1269 case HCI_SCODATA_PKT:
1270 len = HCI_MAX_SCO_SIZE;
1271 hlen = HCI_SCO_HDR_SIZE;
1272 break;
1273 }
1274
1275 skb = bt_skb_alloc(len, gfp_mask);
1276 if (!skb)
1277 return -ENOMEM;
1278
1279 scb = (void *) skb->cb;
1280 scb->expect = hlen;
1281 scb->pkt_type = type;
1282
1283 skb->dev = (void *) hdev;
1284 hdev->reassembly[index] = skb;
1285 }
1286
1287 while (count) {
1288 scb = (void *) skb->cb;
1289 len = min(scb->expect, (__u16)count);
1290
1291 memcpy(skb_put(skb, len), data, len);
1292
1293 count -= len;
1294 data += len;
1295 scb->expect -= len;
1296 remain = count;
1297
1298 switch (type) {
1299 case HCI_EVENT_PKT:
1300 if (skb->len == HCI_EVENT_HDR_SIZE) {
1301 struct hci_event_hdr *h = hci_event_hdr(skb);
1302 scb->expect = h->plen;
1303
1304 if (skb_tailroom(skb) < scb->expect) {
1305 kfree_skb(skb);
1306 hdev->reassembly[index] = NULL;
1307 return -ENOMEM;
1308 }
1309 }
1310 break;
1311
1312 case HCI_ACLDATA_PKT:
1313 if (skb->len == HCI_ACL_HDR_SIZE) {
1314 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1315 scb->expect = __le16_to_cpu(h->dlen);
1316
1317 if (skb_tailroom(skb) < scb->expect) {
1318 kfree_skb(skb);
1319 hdev->reassembly[index] = NULL;
1320 return -ENOMEM;
1321 }
1322 }
1323 break;
1324
1325 case HCI_SCODATA_PKT:
1326 if (skb->len == HCI_SCO_HDR_SIZE) {
1327 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1328 scb->expect = h->dlen;
1329
1330 if (skb_tailroom(skb) < scb->expect) {
1331 kfree_skb(skb);
1332 hdev->reassembly[index] = NULL;
1333 return -ENOMEM;
1334 }
1335 }
1336 break;
1337 }
1338
1339 if (scb->expect == 0) {
1340 /* Complete frame */
1341
1342 bt_cb(skb)->pkt_type = type;
1343 hci_recv_frame(skb);
1344
1345 hdev->reassembly[index] = NULL;
1346 return remain;
1347 }
1348 }
1349
1350 return remain;
1351}
1352
Marcel Holtmannef222012007-07-11 06:42:04 +02001353int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1354{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301355 int rem = 0;
1356
Marcel Holtmannef222012007-07-11 06:42:04 +02001357 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1358 return -EILSEQ;
1359
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001360 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301361 rem = hci_reassembly(hdev, type, data, count,
1362 type - 1, GFP_ATOMIC);
1363 if (rem < 0)
1364 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001365
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301366 data += (count - rem);
1367 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001368 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001369
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301370 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001371}
1372EXPORT_SYMBOL(hci_recv_fragment);
1373
Suraj Sumangala99811512010-07-14 13:02:19 +05301374#define STREAM_REASSEMBLY 0
1375
1376int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1377{
1378 int type;
1379 int rem = 0;
1380
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001381 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301382 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1383
1384 if (!skb) {
1385 struct { char type; } *pkt;
1386
1387 /* Start of the frame */
1388 pkt = data;
1389 type = pkt->type;
1390
1391 data++;
1392 count--;
1393 } else
1394 type = bt_cb(skb)->pkt_type;
1395
1396 rem = hci_reassembly(hdev, type, data,
1397 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1398 if (rem < 0)
1399 return rem;
1400
1401 data += (count - rem);
1402 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001403 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301404
1405 return rem;
1406}
1407EXPORT_SYMBOL(hci_recv_stream_fragment);
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409/* ---- Interface to upper protocols ---- */
1410
1411/* Register/Unregister protocols.
1412 * hci_task_lock is used to ensure that no tasks are running. */
1413int hci_register_proto(struct hci_proto *hp)
1414{
1415 int err = 0;
1416
1417 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1418
1419 if (hp->id >= HCI_MAX_PROTO)
1420 return -EINVAL;
1421
1422 write_lock_bh(&hci_task_lock);
1423
1424 if (!hci_proto[hp->id])
1425 hci_proto[hp->id] = hp;
1426 else
1427 err = -EEXIST;
1428
1429 write_unlock_bh(&hci_task_lock);
1430
1431 return err;
1432}
1433EXPORT_SYMBOL(hci_register_proto);
1434
1435int hci_unregister_proto(struct hci_proto *hp)
1436{
1437 int err = 0;
1438
1439 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1440
1441 if (hp->id >= HCI_MAX_PROTO)
1442 return -EINVAL;
1443
1444 write_lock_bh(&hci_task_lock);
1445
1446 if (hci_proto[hp->id])
1447 hci_proto[hp->id] = NULL;
1448 else
1449 err = -ENOENT;
1450
1451 write_unlock_bh(&hci_task_lock);
1452
1453 return err;
1454}
1455EXPORT_SYMBOL(hci_unregister_proto);
1456
1457int hci_register_cb(struct hci_cb *cb)
1458{
1459 BT_DBG("%p name %s", cb, cb->name);
1460
1461 write_lock_bh(&hci_cb_list_lock);
1462 list_add(&cb->list, &hci_cb_list);
1463 write_unlock_bh(&hci_cb_list_lock);
1464
1465 return 0;
1466}
1467EXPORT_SYMBOL(hci_register_cb);
1468
1469int hci_unregister_cb(struct hci_cb *cb)
1470{
1471 BT_DBG("%p name %s", cb, cb->name);
1472
1473 write_lock_bh(&hci_cb_list_lock);
1474 list_del(&cb->list);
1475 write_unlock_bh(&hci_cb_list_lock);
1476
1477 return 0;
1478}
1479EXPORT_SYMBOL(hci_unregister_cb);
1480
1481static int hci_send_frame(struct sk_buff *skb)
1482{
1483 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1484
1485 if (!hdev) {
1486 kfree_skb(skb);
1487 return -ENODEV;
1488 }
1489
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001490 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 if (atomic_read(&hdev->promisc)) {
1493 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001494 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001496 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
1498
1499 /* Get rid of skb owner, prior to sending to the driver. */
1500 skb_orphan(skb);
1501
1502 return hdev->send(skb);
1503}
1504
1505/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001506int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507{
1508 int len = HCI_COMMAND_HDR_SIZE + plen;
1509 struct hci_command_hdr *hdr;
1510 struct sk_buff *skb;
1511
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001512 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513
1514 skb = bt_skb_alloc(len, GFP_ATOMIC);
1515 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001516 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 return -ENOMEM;
1518 }
1519
1520 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001521 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 hdr->plen = plen;
1523
1524 if (plen)
1525 memcpy(skb_put(skb, plen), param, plen);
1526
1527 BT_DBG("skb len %d", skb->len);
1528
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001529 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001531
Johan Hedberga5040ef2011-01-10 13:28:59 +02001532 if (test_bit(HCI_INIT, &hdev->flags))
1533 hdev->init_last_cmd = opcode;
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001536 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 return 0;
1539}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540
1541/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001542void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543{
1544 struct hci_command_hdr *hdr;
1545
1546 if (!hdev->sent_cmd)
1547 return NULL;
1548
1549 hdr = (void *) hdev->sent_cmd->data;
1550
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001551 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return NULL;
1553
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001554 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1557}
1558
1559/* Send ACL data */
1560static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1561{
1562 struct hci_acl_hdr *hdr;
1563 int len = skb->len;
1564
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001565 skb_push(skb, HCI_ACL_HDR_SIZE);
1566 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001567 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001568 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1569 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570}
1571
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001572void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573{
1574 struct hci_dev *hdev = conn->hdev;
1575 struct sk_buff *list;
1576
1577 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1578
1579 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001580 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001581 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001583 list = skb_shinfo(skb)->frag_list;
1584 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 /* Non fragmented */
1586 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1587
1588 skb_queue_tail(&conn->data_q, skb);
1589 } else {
1590 /* Fragmented */
1591 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1592
1593 skb_shinfo(skb)->frag_list = NULL;
1594
1595 /* Queue all fragments atomically */
1596 spin_lock_bh(&conn->data_q.lock);
1597
1598 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001599
1600 flags &= ~ACL_START;
1601 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 do {
1603 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001604
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001606 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001607 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1610
1611 __skb_queue_tail(&conn->data_q, skb);
1612 } while (list);
1613
1614 spin_unlock_bh(&conn->data_q.lock);
1615 }
1616
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001617 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618}
1619EXPORT_SYMBOL(hci_send_acl);
1620
1621/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001622void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623{
1624 struct hci_dev *hdev = conn->hdev;
1625 struct hci_sco_hdr hdr;
1626
1627 BT_DBG("%s len %d", hdev->name, skb->len);
1628
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001629 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 hdr.dlen = skb->len;
1631
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001632 skb_push(skb, HCI_SCO_HDR_SIZE);
1633 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001634 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001637 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001640 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641}
1642EXPORT_SYMBOL(hci_send_sco);
1643
1644/* ---- HCI TX task (outgoing data) ---- */
1645
1646/* HCI Connection scheduler */
1647static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1648{
1649 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001650 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 int num = 0, min = ~0;
1652 struct list_head *p;
1653
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001654 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 * added and removed with TX task disabled. */
1656 list_for_each(p, &h->list) {
1657 struct hci_conn *c;
1658 c = list_entry(p, struct hci_conn, list);
1659
Marcel Holtmann769be972008-07-14 20:13:49 +02001660 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001662
1663 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1664 continue;
1665
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 num++;
1667
1668 if (c->sent < min) {
1669 min = c->sent;
1670 conn = c;
1671 }
1672 }
1673
1674 if (conn) {
1675 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1676 int q = cnt / num;
1677 *quote = q ? q : 1;
1678 } else
1679 *quote = 0;
1680
1681 BT_DBG("conn %p quote %d", conn, *quote);
1682 return conn;
1683}
1684
1685static inline void hci_acl_tx_to(struct hci_dev *hdev)
1686{
1687 struct hci_conn_hash *h = &hdev->conn_hash;
1688 struct list_head *p;
1689 struct hci_conn *c;
1690
1691 BT_ERR("%s ACL tx timeout", hdev->name);
1692
1693 /* Kill stalled connections */
1694 list_for_each(p, &h->list) {
1695 c = list_entry(p, struct hci_conn, list);
1696 if (c->type == ACL_LINK && c->sent) {
1697 BT_ERR("%s killing stalled ACL connection %s",
1698 hdev->name, batostr(&c->dst));
1699 hci_acl_disconn(c, 0x13);
1700 }
1701 }
1702}
1703
1704static inline void hci_sched_acl(struct hci_dev *hdev)
1705{
1706 struct hci_conn *conn;
1707 struct sk_buff *skb;
1708 int quote;
1709
1710 BT_DBG("%s", hdev->name);
1711
1712 if (!test_bit(HCI_RAW, &hdev->flags)) {
1713 /* ACL tx timeout must be longer than maximum
1714 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001715 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 hci_acl_tx_to(hdev);
1717 }
1718
1719 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1720 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1721 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001722
1723 hci_conn_enter_active_mode(conn);
1724
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 hci_send_frame(skb);
1726 hdev->acl_last_tx = jiffies;
1727
1728 hdev->acl_cnt--;
1729 conn->sent++;
1730 }
1731 }
1732}
1733
1734/* Schedule SCO */
1735static inline void hci_sched_sco(struct hci_dev *hdev)
1736{
1737 struct hci_conn *conn;
1738 struct sk_buff *skb;
1739 int quote;
1740
1741 BT_DBG("%s", hdev->name);
1742
1743 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1744 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1745 BT_DBG("skb %p len %d", skb, skb->len);
1746 hci_send_frame(skb);
1747
1748 conn->sent++;
1749 if (conn->sent == ~0)
1750 conn->sent = 0;
1751 }
1752 }
1753}
1754
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001755static inline void hci_sched_esco(struct hci_dev *hdev)
1756{
1757 struct hci_conn *conn;
1758 struct sk_buff *skb;
1759 int quote;
1760
1761 BT_DBG("%s", hdev->name);
1762
1763 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1764 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1765 BT_DBG("skb %p len %d", skb, skb->len);
1766 hci_send_frame(skb);
1767
1768 conn->sent++;
1769 if (conn->sent == ~0)
1770 conn->sent = 0;
1771 }
1772 }
1773}
1774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775static void hci_tx_task(unsigned long arg)
1776{
1777 struct hci_dev *hdev = (struct hci_dev *) arg;
1778 struct sk_buff *skb;
1779
1780 read_lock(&hci_task_lock);
1781
1782 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1783
1784 /* Schedule queues and send stuff to HCI driver */
1785
1786 hci_sched_acl(hdev);
1787
1788 hci_sched_sco(hdev);
1789
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001790 hci_sched_esco(hdev);
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 /* Send next queued raw (unknown type) packet */
1793 while ((skb = skb_dequeue(&hdev->raw_q)))
1794 hci_send_frame(skb);
1795
1796 read_unlock(&hci_task_lock);
1797}
1798
1799/* ----- HCI RX task (incoming data proccessing) ----- */
1800
1801/* ACL data packet */
1802static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1803{
1804 struct hci_acl_hdr *hdr = (void *) skb->data;
1805 struct hci_conn *conn;
1806 __u16 handle, flags;
1807
1808 skb_pull(skb, HCI_ACL_HDR_SIZE);
1809
1810 handle = __le16_to_cpu(hdr->handle);
1811 flags = hci_flags(handle);
1812 handle = hci_handle(handle);
1813
1814 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1815
1816 hdev->stat.acl_rx++;
1817
1818 hci_dev_lock(hdev);
1819 conn = hci_conn_hash_lookup_handle(hdev, handle);
1820 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001821
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 if (conn) {
1823 register struct hci_proto *hp;
1824
Marcel Holtmann04837f62006-07-03 10:02:33 +02001825 hci_conn_enter_active_mode(conn);
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001828 hp = hci_proto[HCI_PROTO_L2CAP];
1829 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 hp->recv_acldata(conn, skb, flags);
1831 return;
1832 }
1833 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001834 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 hdev->name, handle);
1836 }
1837
1838 kfree_skb(skb);
1839}
1840
1841/* SCO data packet */
1842static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1843{
1844 struct hci_sco_hdr *hdr = (void *) skb->data;
1845 struct hci_conn *conn;
1846 __u16 handle;
1847
1848 skb_pull(skb, HCI_SCO_HDR_SIZE);
1849
1850 handle = __le16_to_cpu(hdr->handle);
1851
1852 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1853
1854 hdev->stat.sco_rx++;
1855
1856 hci_dev_lock(hdev);
1857 conn = hci_conn_hash_lookup_handle(hdev, handle);
1858 hci_dev_unlock(hdev);
1859
1860 if (conn) {
1861 register struct hci_proto *hp;
1862
1863 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001864 hp = hci_proto[HCI_PROTO_SCO];
1865 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 hp->recv_scodata(conn, skb);
1867 return;
1868 }
1869 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001870 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 hdev->name, handle);
1872 }
1873
1874 kfree_skb(skb);
1875}
1876
Marcel Holtmann65164552005-10-28 19:20:48 +02001877static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878{
1879 struct hci_dev *hdev = (struct hci_dev *) arg;
1880 struct sk_buff *skb;
1881
1882 BT_DBG("%s", hdev->name);
1883
1884 read_lock(&hci_task_lock);
1885
1886 while ((skb = skb_dequeue(&hdev->rx_q))) {
1887 if (atomic_read(&hdev->promisc)) {
1888 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001889 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 }
1891
1892 if (test_bit(HCI_RAW, &hdev->flags)) {
1893 kfree_skb(skb);
1894 continue;
1895 }
1896
1897 if (test_bit(HCI_INIT, &hdev->flags)) {
1898 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001899 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 case HCI_ACLDATA_PKT:
1901 case HCI_SCODATA_PKT:
1902 kfree_skb(skb);
1903 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001904 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 }
1906
1907 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001908 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 case HCI_EVENT_PKT:
1910 hci_event_packet(hdev, skb);
1911 break;
1912
1913 case HCI_ACLDATA_PKT:
1914 BT_DBG("%s ACL data packet", hdev->name);
1915 hci_acldata_packet(hdev, skb);
1916 break;
1917
1918 case HCI_SCODATA_PKT:
1919 BT_DBG("%s SCO data packet", hdev->name);
1920 hci_scodata_packet(hdev, skb);
1921 break;
1922
1923 default:
1924 kfree_skb(skb);
1925 break;
1926 }
1927 }
1928
1929 read_unlock(&hci_task_lock);
1930}
1931
1932static void hci_cmd_task(unsigned long arg)
1933{
1934 struct hci_dev *hdev = (struct hci_dev *) arg;
1935 struct sk_buff *skb;
1936
1937 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1938
S.Çağlar Onur82453022008-02-17 23:25:57 -08001939 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 BT_ERR("%s command tx timeout", hdev->name);
1941 atomic_set(&hdev->cmd_cnt, 1);
1942 }
1943
1944 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001945 if (atomic_read(&hdev->cmd_cnt)) {
1946 skb = skb_dequeue(&hdev->cmd_q);
1947 if (!skb)
1948 return;
1949
Wei Yongjun7585b972009-02-25 18:29:52 +08001950 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001952 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1953 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 atomic_dec(&hdev->cmd_cnt);
1955 hci_send_frame(skb);
1956 hdev->cmd_last_tx = jiffies;
1957 } else {
1958 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001959 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 }
1961 }
1962}