blob: c01415bc89466aa8ad775d9069d3b865f62bc329 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300266static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267{
268 BT_DBG("%s", hdev->name);
269
270 /* Read LE buffer size */
271 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 scan = opt;
277
278 BT_DBG("%s %x", hdev->name, scan);
279
280 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200281 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
284static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 auth = opt;
287
288 BT_DBG("%s %x", hdev->name, auth);
289
290 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200291 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 encrypt = opt;
297
298 BT_DBG("%s %x", hdev->name, encrypt);
299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200300 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200301 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __le16 policy = cpu_to_le16(opt);
307
Marcel Holtmanna418b892008-11-30 12:17:28 +0100308 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200309
310 /* Default link policy */
311 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312}
313
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900314/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * Device is held on return. */
316struct hci_dev *hci_dev_get(int index)
317{
318 struct hci_dev *hdev = NULL;
319 struct list_head *p;
320
321 BT_DBG("%d", index);
322
323 if (index < 0)
324 return NULL;
325
326 read_lock(&hci_dev_list_lock);
327 list_for_each(p, &hci_dev_list) {
328 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329 if (d->id == index) {
330 hdev = hci_dev_hold(d);
331 break;
332 }
333 }
334 read_unlock(&hci_dev_list_lock);
335 return hdev;
336}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/* ---- Inquiry support ---- */
339static void inquiry_cache_flush(struct hci_dev *hdev)
340{
341 struct inquiry_cache *cache = &hdev->inq_cache;
342 struct inquiry_entry *next = cache->list, *e;
343
344 BT_DBG("cache %p", cache);
345
346 cache->list = NULL;
347 while ((e = next)) {
348 next = e->next;
349 kfree(e);
350 }
351}
352
353struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354{
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
357
358 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360 for (e = cache->list; e; e = e->next)
361 if (!bacmp(&e->data.bdaddr, bdaddr))
362 break;
363 return e;
364}
365
366void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200369 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379
380 ie->next = cache->list;
381 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200384 memcpy(&ie->data, data, sizeof(*data));
385 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 cache->timestamp = jiffies;
387}
388
389static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_info *info = (struct inquiry_info *) buf;
393 struct inquiry_entry *e;
394 int copied = 0;
395
396 for (e = cache->list; e && copied < num; e = e->next, copied++) {
397 struct inquiry_data *data = &e->data;
398 bacpy(&info->bdaddr, &data->bdaddr);
399 info->pscan_rep_mode = data->pscan_rep_mode;
400 info->pscan_period_mode = data->pscan_period_mode;
401 info->pscan_mode = data->pscan_mode;
402 memcpy(info->dev_class, data->dev_class, 3);
403 info->clock_offset = data->clock_offset;
404 info++;
405 }
406
407 BT_DBG("cache %p, copied %d", cache, copied);
408 return copied;
409}
410
411static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412{
413 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414 struct hci_cp_inquiry cp;
415
416 BT_DBG("%s", hdev->name);
417
418 if (test_bit(HCI_INQUIRY, &hdev->flags))
419 return;
420
421 /* Start Inquiry */
422 memcpy(&cp.lap, &ir->lap, 3);
423 cp.length = ir->length;
424 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200425 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
428int hci_inquiry(void __user *arg)
429{
430 __u8 __user *ptr = arg;
431 struct hci_inquiry_req ir;
432 struct hci_dev *hdev;
433 int err = 0, do_inquiry = 0, max_rsp;
434 long timeo;
435 __u8 *buf;
436
437 if (copy_from_user(&ir, ptr, sizeof(ir)))
438 return -EFAULT;
439
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200440 hdev = hci_dev_get(ir.dev_id);
441 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return -ENODEV;
443
444 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900445 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200446 inquiry_cache_empty(hdev) ||
447 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 inquiry_cache_flush(hdev);
449 do_inquiry = 1;
450 }
451 hci_dev_unlock_bh(hdev);
452
Marcel Holtmann04837f62006-07-03 10:02:33 +0200453 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200454
455 if (do_inquiry) {
456 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457 if (err < 0)
458 goto done;
459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
466 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200467 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
468 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 err = -ENOMEM;
470 goto done;
471 }
472
473 hci_dev_lock_bh(hdev);
474 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475 hci_dev_unlock_bh(hdev);
476
477 BT_DBG("num_rsp %d", ir.num_rsp);
478
479 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480 ptr += sizeof(ir);
481 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482 ir.num_rsp))
483 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900484 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 err = -EFAULT;
486
487 kfree(buf);
488
489done:
490 hci_dev_put(hdev);
491 return err;
492}
493
494/* ---- HCI ioctl helpers ---- */
495
496int hci_dev_open(__u16 dev)
497{
498 struct hci_dev *hdev;
499 int ret = 0;
500
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200501 hdev = hci_dev_get(dev);
502 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 return -ENODEV;
504
505 BT_DBG("%s %p", hdev->name, hdev);
506
507 hci_req_lock(hdev);
508
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200509 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510 ret = -ERFKILL;
511 goto done;
512 }
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 if (test_bit(HCI_UP, &hdev->flags)) {
515 ret = -EALREADY;
516 goto done;
517 }
518
519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520 set_bit(HCI_RAW, &hdev->flags);
521
Marcel Holtmann943da252010-02-13 02:28:41 +0100522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev->dev_type != HCI_BREDR)
524 set_bit(HCI_RAW, &hdev->flags);
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (hdev->open(hdev)) {
527 ret = -EIO;
528 goto done;
529 }
530
531 if (!test_bit(HCI_RAW, &hdev->flags)) {
532 atomic_set(&hdev->cmd_cnt, 1);
533 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200534 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200537 ret = __hci_request(hdev, hci_init_req, 0,
538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 clear_bit(HCI_INIT, &hdev->flags);
545 }
546
547 if (!ret) {
548 hci_dev_hold(hdev);
549 set_bit(HCI_UP, &hdev->flags);
550 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900553 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 /* Init failed, cleanup */
555 tasklet_kill(&hdev->rx_task);
556 tasklet_kill(&hdev->tx_task);
557 tasklet_kill(&hdev->cmd_task);
558
559 skb_queue_purge(&hdev->cmd_q);
560 skb_queue_purge(&hdev->rx_q);
561
562 if (hdev->flush)
563 hdev->flush(hdev);
564
565 if (hdev->sent_cmd) {
566 kfree_skb(hdev->sent_cmd);
567 hdev->sent_cmd = NULL;
568 }
569
570 hdev->close(hdev);
571 hdev->flags = 0;
572 }
573
574done:
575 hci_req_unlock(hdev);
576 hci_dev_put(hdev);
577 return ret;
578}
579
580static int hci_dev_do_close(struct hci_dev *hdev)
581{
582 BT_DBG("%s %p", hdev->name, hdev);
583
584 hci_req_cancel(hdev, ENODEV);
585 hci_req_lock(hdev);
586
587 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
588 hci_req_unlock(hdev);
589 return 0;
590 }
591
592 /* Kill RX and TX tasks */
593 tasklet_kill(&hdev->rx_task);
594 tasklet_kill(&hdev->tx_task);
595
596 hci_dev_lock_bh(hdev);
597 inquiry_cache_flush(hdev);
598 hci_conn_hash_flush(hdev);
599 hci_dev_unlock_bh(hdev);
600
601 hci_notify(hdev, HCI_DEV_DOWN);
602
603 if (hdev->flush)
604 hdev->flush(hdev);
605
606 /* Reset device */
607 skb_queue_purge(&hdev->cmd_q);
608 atomic_set(&hdev->cmd_cnt, 1);
609 if (!test_bit(HCI_RAW, &hdev->flags)) {
610 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200611 __hci_request(hdev, hci_reset_req, 0,
612 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 clear_bit(HCI_INIT, &hdev->flags);
614 }
615
616 /* Kill cmd task */
617 tasklet_kill(&hdev->cmd_task);
618
619 /* Drop queues */
620 skb_queue_purge(&hdev->rx_q);
621 skb_queue_purge(&hdev->cmd_q);
622 skb_queue_purge(&hdev->raw_q);
623
624 /* Drop last sent command */
625 if (hdev->sent_cmd) {
626 kfree_skb(hdev->sent_cmd);
627 hdev->sent_cmd = NULL;
628 }
629
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
632 hdev->close(hdev);
633
Johan Hedberg5add6af2010-12-16 10:00:37 +0200634 mgmt_powered(hdev->id, 0);
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 /* Clear flags */
637 hdev->flags = 0;
638
639 hci_req_unlock(hdev);
640
641 hci_dev_put(hdev);
642 return 0;
643}
644
645int hci_dev_close(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int err;
649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200650 hdev = hci_dev_get(dev);
651 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return -ENODEV;
653 err = hci_dev_do_close(hdev);
654 hci_dev_put(hdev);
655 return err;
656}
657
658int hci_dev_reset(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200663 hdev = hci_dev_get(dev);
664 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return -ENODEV;
666
667 hci_req_lock(hdev);
668 tasklet_disable(&hdev->tx_task);
669
670 if (!test_bit(HCI_UP, &hdev->flags))
671 goto done;
672
673 /* Drop queues */
674 skb_queue_purge(&hdev->rx_q);
675 skb_queue_purge(&hdev->cmd_q);
676
677 hci_dev_lock_bh(hdev);
678 inquiry_cache_flush(hdev);
679 hci_conn_hash_flush(hdev);
680 hci_dev_unlock_bh(hdev);
681
682 if (hdev->flush)
683 hdev->flush(hdev);
684
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900685 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300686 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 ret = __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692done:
693 tasklet_enable(&hdev->tx_task);
694 hci_req_unlock(hdev);
695 hci_dev_put(hdev);
696 return ret;
697}
698
699int hci_dev_reset_stat(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200704 hdev = hci_dev_get(dev);
705 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return -ENODEV;
707
708 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710 hci_dev_put(hdev);
711
712 return ret;
713}
714
715int hci_dev_cmd(unsigned int cmd, void __user *arg)
716{
717 struct hci_dev *hdev;
718 struct hci_dev_req dr;
719 int err = 0;
720
721 if (copy_from_user(&dr, arg, sizeof(dr)))
722 return -EFAULT;
723
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200724 hdev = hci_dev_get(dr.dev_id);
725 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return -ENODEV;
727
728 switch (cmd) {
729 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 break;
733
734 case HCISETENCRYPT:
735 if (!lmp_encrypt_capable(hdev)) {
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200742 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (err)
745 break;
746 }
747
Marcel Holtmann04837f62006-07-03 10:02:33 +0200748 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 break;
751
752 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200753 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 break;
756
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757 case HCISETLINKPOL:
758 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 break;
761
762 case HCISETLINKMODE:
763 hdev->link_mode = ((__u16) dr.dev_opt) &
764 (HCI_LM_MASTER | HCI_LM_ACCEPT);
765 break;
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 case HCISETPTYPE:
768 hdev->pkt_type = (__u16) dr.dev_opt;
769 break;
770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200772 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 break;
775
776 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200777 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 break;
780
781 default:
782 err = -EINVAL;
783 break;
784 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 hci_dev_put(hdev);
787 return err;
788}
789
790int hci_get_dev_list(void __user *arg)
791{
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 struct list_head *p;
795 int n = 0, size, err;
796 __u16 dev_num;
797
798 if (get_user(dev_num, (__u16 __user *) arg))
799 return -EFAULT;
800
801 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802 return -EINVAL;
803
804 size = sizeof(*dl) + dev_num * sizeof(*dr);
805
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200806 dl = kzalloc(size, GFP_KERNEL);
807 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return -ENOMEM;
809
810 dr = dl->dev_req;
811
812 read_lock_bh(&hci_dev_list_lock);
813 list_for_each(p, &hci_dev_list) {
814 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200817
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200818 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200819
820 if (!test_bit(HCI_MGMT, &hdev->flags))
821 set_bit(HCI_PAIRABLE, &hdev->flags);
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 (dr + n)->dev_id = hdev->id;
824 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (++n >= dev_num)
827 break;
828 }
829 read_unlock_bh(&hci_dev_list_lock);
830
831 dl->dev_num = n;
832 size = sizeof(*dl) + n * sizeof(*dr);
833
834 err = copy_to_user(arg, dl, size);
835 kfree(dl);
836
837 return err ? -EFAULT : 0;
838}
839
840int hci_get_dev_info(void __user *arg)
841{
842 struct hci_dev *hdev;
843 struct hci_dev_info di;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 return -ENODEV;
852
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200853 hci_del_off_timer(hdev);
854
Johan Hedbergc542a062011-01-26 13:11:03 +0200855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 strcpy(di.name, hdev->name);
859 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100860 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 di.flags = hdev->flags;
862 di.pkt_type = hdev->pkt_type;
863 di.acl_mtu = hdev->acl_mtu;
864 di.acl_pkts = hdev->acl_pkts;
865 di.sco_mtu = hdev->sco_mtu;
866 di.sco_pkts = hdev->sco_pkts;
867 di.link_policy = hdev->link_policy;
868 di.link_mode = hdev->link_mode;
869
870 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871 memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873 if (copy_to_user(arg, &di, sizeof(di)))
874 err = -EFAULT;
875
876 hci_dev_put(hdev);
877
878 return err;
879}
880
881/* ---- Interface to HCI drivers ---- */
882
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886
887 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889 if (!blocked)
890 return 0;
891
892 hci_dev_do_close(hdev);
893
894 return 0;
895}
896
897static const struct rfkill_ops hci_rfkill_ops = {
898 .set_block = hci_rfkill_set_block,
899};
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901/* Alloc HCI device */
902struct hci_dev *hci_alloc_dev(void)
903{
904 struct hci_dev *hdev;
905
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200906 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (!hdev)
908 return NULL;
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200921 /* will free via device release */
922 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924EXPORT_SYMBOL(hci_free_dev);
925
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
1005 struct list_head *p;
1006
1007 list_for_each(p, &hdev->link_keys) {
1008 struct link_key *k;
1009
1010 k = list_entry(p, struct link_key, list);
1011
1012 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013 return k;
1014 }
1015
1016 return NULL;
1017}
1018
1019int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020 u8 *val, u8 type, u8 pin_len)
1021{
1022 struct link_key *key, *old_key;
1023 u8 old_key_type;
1024
1025 old_key = hci_find_link_key(hdev, bdaddr);
1026 if (old_key) {
1027 old_key_type = old_key->type;
1028 key = old_key;
1029 } else {
1030 old_key_type = 0xff;
1031 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032 if (!key)
1033 return -ENOMEM;
1034 list_add(&key->list, &hdev->link_keys);
1035 }
1036
1037 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039 bacpy(&key->bdaddr, bdaddr);
1040 memcpy(key->val, val, 16);
1041 key->type = type;
1042 key->pin_len = pin_len;
1043
1044 if (new_key)
1045 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047 if (type == 0x06)
1048 key->type = old_key_type;
1049
1050 return 0;
1051}
1052
1053int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054{
1055 struct link_key *key;
1056
1057 key = hci_find_link_key(hdev, bdaddr);
1058 if (!key)
1059 return -ENOENT;
1060
1061 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063 list_del(&key->list);
1064 kfree(key);
1065
1066 return 0;
1067}
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/* Register HCI device */
1070int hci_register_dev(struct hci_dev *hdev)
1071{
1072 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001073 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001075 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1076 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (!hdev->open || !hdev->close || !hdev->destruct)
1079 return -EINVAL;
1080
1081 write_lock_bh(&hci_dev_list_lock);
1082
1083 /* Find first available device id */
1084 list_for_each(p, &hci_dev_list) {
1085 if (list_entry(p, struct hci_dev, list)->id != id)
1086 break;
1087 head = p; id++;
1088 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 sprintf(hdev->name, "hci%d", id);
1091 hdev->id = id;
1092 list_add(&hdev->list, head);
1093
1094 atomic_set(&hdev->refcnt, 1);
1095 spin_lock_init(&hdev->lock);
1096
1097 hdev->flags = 0;
1098 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001099 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001101 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Marcel Holtmann04837f62006-07-03 10:02:33 +02001103 hdev->idle_timeout = 0;
1104 hdev->sniff_max_interval = 800;
1105 hdev->sniff_min_interval = 80;
1106
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001107 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1109 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1110
1111 skb_queue_head_init(&hdev->rx_q);
1112 skb_queue_head_init(&hdev->cmd_q);
1113 skb_queue_head_init(&hdev->raw_q);
1114
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301115 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001116 hdev->reassembly[i] = NULL;
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001119 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 inquiry_cache_init(hdev);
1122
1123 hci_conn_hash_init(hdev);
1124
David Millerea4bd8b2010-07-30 21:54:49 -07001125 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001126
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001127 INIT_LIST_HEAD(&hdev->uuids);
1128
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 INIT_LIST_HEAD(&hdev->link_keys);
1130
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001131 INIT_WORK(&hdev->power_on, hci_power_on);
1132 INIT_WORK(&hdev->power_off, hci_power_off);
1133 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1136
1137 atomic_set(&hdev->promisc, 0);
1138
1139 write_unlock_bh(&hci_dev_list_lock);
1140
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001141 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1142 if (!hdev->workqueue)
1143 goto nomem;
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 hci_register_sysfs(hdev);
1146
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001147 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1148 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1149 if (hdev->rfkill) {
1150 if (rfkill_register(hdev->rfkill) < 0) {
1151 rfkill_destroy(hdev->rfkill);
1152 hdev->rfkill = NULL;
1153 }
1154 }
1155
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001156 set_bit(HCI_AUTO_OFF, &hdev->flags);
1157 set_bit(HCI_SETUP, &hdev->flags);
1158 queue_work(hdev->workqueue, &hdev->power_on);
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 hci_notify(hdev, HCI_DEV_REG);
1161
1162 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001163
1164nomem:
1165 write_lock_bh(&hci_dev_list_lock);
1166 list_del(&hdev->list);
1167 write_unlock_bh(&hci_dev_list_lock);
1168
1169 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171EXPORT_SYMBOL(hci_register_dev);
1172
1173/* Unregister HCI device */
1174int hci_unregister_dev(struct hci_dev *hdev)
1175{
Marcel Holtmannef222012007-07-11 06:42:04 +02001176 int i;
1177
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 write_lock_bh(&hci_dev_list_lock);
1181 list_del(&hdev->list);
1182 write_unlock_bh(&hci_dev_list_lock);
1183
1184 hci_dev_do_close(hdev);
1185
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301186 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001187 kfree_skb(hdev->reassembly[i]);
1188
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001189 if (!test_bit(HCI_INIT, &hdev->flags) &&
1190 !test_bit(HCI_SETUP, &hdev->flags))
1191 mgmt_index_removed(hdev->id);
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 hci_notify(hdev, HCI_DEV_UNREG);
1194
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001195 if (hdev->rfkill) {
1196 rfkill_unregister(hdev->rfkill);
1197 rfkill_destroy(hdev->rfkill);
1198 }
1199
Dave Young147e2d52008-03-05 18:45:59 -08001200 hci_unregister_sysfs(hdev);
1201
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001202 hci_del_off_timer(hdev);
1203
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001204 destroy_workqueue(hdev->workqueue);
1205
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001206 hci_dev_lock_bh(hdev);
1207 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001208 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001209 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001210 hci_dev_unlock_bh(hdev);
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 return 0;
1215}
1216EXPORT_SYMBOL(hci_unregister_dev);
1217
1218/* Suspend HCI device */
1219int hci_suspend_dev(struct hci_dev *hdev)
1220{
1221 hci_notify(hdev, HCI_DEV_SUSPEND);
1222 return 0;
1223}
1224EXPORT_SYMBOL(hci_suspend_dev);
1225
1226/* Resume HCI device */
1227int hci_resume_dev(struct hci_dev *hdev)
1228{
1229 hci_notify(hdev, HCI_DEV_RESUME);
1230 return 0;
1231}
1232EXPORT_SYMBOL(hci_resume_dev);
1233
Marcel Holtmann76bca882009-11-18 00:40:39 +01001234/* Receive frame from HCI drivers */
1235int hci_recv_frame(struct sk_buff *skb)
1236{
1237 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1238 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1239 && !test_bit(HCI_INIT, &hdev->flags))) {
1240 kfree_skb(skb);
1241 return -ENXIO;
1242 }
1243
1244 /* Incomming skb */
1245 bt_cb(skb)->incoming = 1;
1246
1247 /* Time stamp */
1248 __net_timestamp(skb);
1249
1250 /* Queue frame for rx task */
1251 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001252 tasklet_schedule(&hdev->rx_task);
1253
Marcel Holtmann76bca882009-11-18 00:40:39 +01001254 return 0;
1255}
1256EXPORT_SYMBOL(hci_recv_frame);
1257
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301258static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1259 int count, __u8 index, gfp_t gfp_mask)
1260{
1261 int len = 0;
1262 int hlen = 0;
1263 int remain = count;
1264 struct sk_buff *skb;
1265 struct bt_skb_cb *scb;
1266
1267 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1268 index >= NUM_REASSEMBLY)
1269 return -EILSEQ;
1270
1271 skb = hdev->reassembly[index];
1272
1273 if (!skb) {
1274 switch (type) {
1275 case HCI_ACLDATA_PKT:
1276 len = HCI_MAX_FRAME_SIZE;
1277 hlen = HCI_ACL_HDR_SIZE;
1278 break;
1279 case HCI_EVENT_PKT:
1280 len = HCI_MAX_EVENT_SIZE;
1281 hlen = HCI_EVENT_HDR_SIZE;
1282 break;
1283 case HCI_SCODATA_PKT:
1284 len = HCI_MAX_SCO_SIZE;
1285 hlen = HCI_SCO_HDR_SIZE;
1286 break;
1287 }
1288
1289 skb = bt_skb_alloc(len, gfp_mask);
1290 if (!skb)
1291 return -ENOMEM;
1292
1293 scb = (void *) skb->cb;
1294 scb->expect = hlen;
1295 scb->pkt_type = type;
1296
1297 skb->dev = (void *) hdev;
1298 hdev->reassembly[index] = skb;
1299 }
1300
1301 while (count) {
1302 scb = (void *) skb->cb;
1303 len = min(scb->expect, (__u16)count);
1304
1305 memcpy(skb_put(skb, len), data, len);
1306
1307 count -= len;
1308 data += len;
1309 scb->expect -= len;
1310 remain = count;
1311
1312 switch (type) {
1313 case HCI_EVENT_PKT:
1314 if (skb->len == HCI_EVENT_HDR_SIZE) {
1315 struct hci_event_hdr *h = hci_event_hdr(skb);
1316 scb->expect = h->plen;
1317
1318 if (skb_tailroom(skb) < scb->expect) {
1319 kfree_skb(skb);
1320 hdev->reassembly[index] = NULL;
1321 return -ENOMEM;
1322 }
1323 }
1324 break;
1325
1326 case HCI_ACLDATA_PKT:
1327 if (skb->len == HCI_ACL_HDR_SIZE) {
1328 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1329 scb->expect = __le16_to_cpu(h->dlen);
1330
1331 if (skb_tailroom(skb) < scb->expect) {
1332 kfree_skb(skb);
1333 hdev->reassembly[index] = NULL;
1334 return -ENOMEM;
1335 }
1336 }
1337 break;
1338
1339 case HCI_SCODATA_PKT:
1340 if (skb->len == HCI_SCO_HDR_SIZE) {
1341 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1342 scb->expect = h->dlen;
1343
1344 if (skb_tailroom(skb) < scb->expect) {
1345 kfree_skb(skb);
1346 hdev->reassembly[index] = NULL;
1347 return -ENOMEM;
1348 }
1349 }
1350 break;
1351 }
1352
1353 if (scb->expect == 0) {
1354 /* Complete frame */
1355
1356 bt_cb(skb)->pkt_type = type;
1357 hci_recv_frame(skb);
1358
1359 hdev->reassembly[index] = NULL;
1360 return remain;
1361 }
1362 }
1363
1364 return remain;
1365}
1366
Marcel Holtmannef222012007-07-11 06:42:04 +02001367int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1368{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301369 int rem = 0;
1370
Marcel Holtmannef222012007-07-11 06:42:04 +02001371 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1372 return -EILSEQ;
1373
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001374 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301375 rem = hci_reassembly(hdev, type, data, count,
1376 type - 1, GFP_ATOMIC);
1377 if (rem < 0)
1378 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001379
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301380 data += (count - rem);
1381 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001382 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001383
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301384 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001385}
1386EXPORT_SYMBOL(hci_recv_fragment);
1387
Suraj Sumangala99811512010-07-14 13:02:19 +05301388#define STREAM_REASSEMBLY 0
1389
1390int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1391{
1392 int type;
1393 int rem = 0;
1394
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001395 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301396 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1397
1398 if (!skb) {
1399 struct { char type; } *pkt;
1400
1401 /* Start of the frame */
1402 pkt = data;
1403 type = pkt->type;
1404
1405 data++;
1406 count--;
1407 } else
1408 type = bt_cb(skb)->pkt_type;
1409
1410 rem = hci_reassembly(hdev, type, data,
1411 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1412 if (rem < 0)
1413 return rem;
1414
1415 data += (count - rem);
1416 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001417 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301418
1419 return rem;
1420}
1421EXPORT_SYMBOL(hci_recv_stream_fragment);
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423/* ---- Interface to upper protocols ---- */
1424
1425/* Register/Unregister protocols.
1426 * hci_task_lock is used to ensure that no tasks are running. */
1427int hci_register_proto(struct hci_proto *hp)
1428{
1429 int err = 0;
1430
1431 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1432
1433 if (hp->id >= HCI_MAX_PROTO)
1434 return -EINVAL;
1435
1436 write_lock_bh(&hci_task_lock);
1437
1438 if (!hci_proto[hp->id])
1439 hci_proto[hp->id] = hp;
1440 else
1441 err = -EEXIST;
1442
1443 write_unlock_bh(&hci_task_lock);
1444
1445 return err;
1446}
1447EXPORT_SYMBOL(hci_register_proto);
1448
1449int hci_unregister_proto(struct hci_proto *hp)
1450{
1451 int err = 0;
1452
1453 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1454
1455 if (hp->id >= HCI_MAX_PROTO)
1456 return -EINVAL;
1457
1458 write_lock_bh(&hci_task_lock);
1459
1460 if (hci_proto[hp->id])
1461 hci_proto[hp->id] = NULL;
1462 else
1463 err = -ENOENT;
1464
1465 write_unlock_bh(&hci_task_lock);
1466
1467 return err;
1468}
1469EXPORT_SYMBOL(hci_unregister_proto);
1470
1471int hci_register_cb(struct hci_cb *cb)
1472{
1473 BT_DBG("%p name %s", cb, cb->name);
1474
1475 write_lock_bh(&hci_cb_list_lock);
1476 list_add(&cb->list, &hci_cb_list);
1477 write_unlock_bh(&hci_cb_list_lock);
1478
1479 return 0;
1480}
1481EXPORT_SYMBOL(hci_register_cb);
1482
1483int hci_unregister_cb(struct hci_cb *cb)
1484{
1485 BT_DBG("%p name %s", cb, cb->name);
1486
1487 write_lock_bh(&hci_cb_list_lock);
1488 list_del(&cb->list);
1489 write_unlock_bh(&hci_cb_list_lock);
1490
1491 return 0;
1492}
1493EXPORT_SYMBOL(hci_unregister_cb);
1494
1495static int hci_send_frame(struct sk_buff *skb)
1496{
1497 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1498
1499 if (!hdev) {
1500 kfree_skb(skb);
1501 return -ENODEV;
1502 }
1503
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001504 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
1506 if (atomic_read(&hdev->promisc)) {
1507 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001508 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001510 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 }
1512
1513 /* Get rid of skb owner, prior to sending to the driver. */
1514 skb_orphan(skb);
1515
1516 return hdev->send(skb);
1517}
1518
1519/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001520int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521{
1522 int len = HCI_COMMAND_HDR_SIZE + plen;
1523 struct hci_command_hdr *hdr;
1524 struct sk_buff *skb;
1525
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001526 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
1528 skb = bt_skb_alloc(len, GFP_ATOMIC);
1529 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001530 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 return -ENOMEM;
1532 }
1533
1534 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001535 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536 hdr->plen = plen;
1537
1538 if (plen)
1539 memcpy(skb_put(skb, plen), param, plen);
1540
1541 BT_DBG("skb len %d", skb->len);
1542
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001543 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001545
Johan Hedberga5040ef2011-01-10 13:28:59 +02001546 if (test_bit(HCI_INIT, &hdev->flags))
1547 hdev->init_last_cmd = opcode;
1548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001550 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551
1552 return 0;
1553}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
1555/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001556void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557{
1558 struct hci_command_hdr *hdr;
1559
1560 if (!hdev->sent_cmd)
1561 return NULL;
1562
1563 hdr = (void *) hdev->sent_cmd->data;
1564
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001565 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 return NULL;
1567
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001568 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1571}
1572
1573/* Send ACL data */
1574static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1575{
1576 struct hci_acl_hdr *hdr;
1577 int len = skb->len;
1578
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001579 skb_push(skb, HCI_ACL_HDR_SIZE);
1580 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1583 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584}
1585
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001586void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587{
1588 struct hci_dev *hdev = conn->hdev;
1589 struct sk_buff *list;
1590
1591 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1592
1593 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001594 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001595 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001597 list = skb_shinfo(skb)->frag_list;
1598 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 /* Non fragmented */
1600 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1601
1602 skb_queue_tail(&conn->data_q, skb);
1603 } else {
1604 /* Fragmented */
1605 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1606
1607 skb_shinfo(skb)->frag_list = NULL;
1608
1609 /* Queue all fragments atomically */
1610 spin_lock_bh(&conn->data_q.lock);
1611
1612 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001613
1614 flags &= ~ACL_START;
1615 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 do {
1617 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001618
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001620 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001621 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1624
1625 __skb_queue_tail(&conn->data_q, skb);
1626 } while (list);
1627
1628 spin_unlock_bh(&conn->data_q.lock);
1629 }
1630
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001631 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632}
1633EXPORT_SYMBOL(hci_send_acl);
1634
1635/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001636void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637{
1638 struct hci_dev *hdev = conn->hdev;
1639 struct hci_sco_hdr hdr;
1640
1641 BT_DBG("%s len %d", hdev->name, skb->len);
1642
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001643 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 hdr.dlen = skb->len;
1645
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001646 skb_push(skb, HCI_SCO_HDR_SIZE);
1647 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001648 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649
1650 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001651 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001654 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655}
1656EXPORT_SYMBOL(hci_send_sco);
1657
1658/* ---- HCI TX task (outgoing data) ---- */
1659
1660/* HCI Connection scheduler */
1661static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1662{
1663 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001664 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 int num = 0, min = ~0;
1666 struct list_head *p;
1667
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001668 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 * added and removed with TX task disabled. */
1670 list_for_each(p, &h->list) {
1671 struct hci_conn *c;
1672 c = list_entry(p, struct hci_conn, list);
1673
Marcel Holtmann769be972008-07-14 20:13:49 +02001674 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001676
1677 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1678 continue;
1679
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 num++;
1681
1682 if (c->sent < min) {
1683 min = c->sent;
1684 conn = c;
1685 }
1686 }
1687
1688 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001689 int cnt, q;
1690
1691 switch (conn->type) {
1692 case ACL_LINK:
1693 cnt = hdev->acl_cnt;
1694 break;
1695 case SCO_LINK:
1696 case ESCO_LINK:
1697 cnt = hdev->sco_cnt;
1698 break;
1699 case LE_LINK:
1700 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1701 break;
1702 default:
1703 cnt = 0;
1704 BT_ERR("Unknown link type");
1705 }
1706
1707 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 *quote = q ? q : 1;
1709 } else
1710 *quote = 0;
1711
1712 BT_DBG("conn %p quote %d", conn, *quote);
1713 return conn;
1714}
1715
Ville Tervobae1f5d2011-02-10 22:38:53 -03001716static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717{
1718 struct hci_conn_hash *h = &hdev->conn_hash;
1719 struct list_head *p;
1720 struct hci_conn *c;
1721
Ville Tervobae1f5d2011-02-10 22:38:53 -03001722 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 /* Kill stalled connections */
1725 list_for_each(p, &h->list) {
1726 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03001727 if (c->type == type && c->sent) {
1728 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 hdev->name, batostr(&c->dst));
1730 hci_acl_disconn(c, 0x13);
1731 }
1732 }
1733}
1734
1735static inline void hci_sched_acl(struct hci_dev *hdev)
1736{
1737 struct hci_conn *conn;
1738 struct sk_buff *skb;
1739 int quote;
1740
1741 BT_DBG("%s", hdev->name);
1742
1743 if (!test_bit(HCI_RAW, &hdev->flags)) {
1744 /* ACL tx timeout must be longer than maximum
1745 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001746 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001747 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 }
1749
1750 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1751 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1752 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001753
1754 hci_conn_enter_active_mode(conn);
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 hci_send_frame(skb);
1757 hdev->acl_last_tx = jiffies;
1758
1759 hdev->acl_cnt--;
1760 conn->sent++;
1761 }
1762 }
1763}
1764
1765/* Schedule SCO */
1766static inline void hci_sched_sco(struct hci_dev *hdev)
1767{
1768 struct hci_conn *conn;
1769 struct sk_buff *skb;
1770 int quote;
1771
1772 BT_DBG("%s", hdev->name);
1773
1774 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1775 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1776 BT_DBG("skb %p len %d", skb, skb->len);
1777 hci_send_frame(skb);
1778
1779 conn->sent++;
1780 if (conn->sent == ~0)
1781 conn->sent = 0;
1782 }
1783 }
1784}
1785
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001786static inline void hci_sched_esco(struct hci_dev *hdev)
1787{
1788 struct hci_conn *conn;
1789 struct sk_buff *skb;
1790 int quote;
1791
1792 BT_DBG("%s", hdev->name);
1793
1794 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1795 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1796 BT_DBG("skb %p len %d", skb, skb->len);
1797 hci_send_frame(skb);
1798
1799 conn->sent++;
1800 if (conn->sent == ~0)
1801 conn->sent = 0;
1802 }
1803 }
1804}
1805
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001806static inline void hci_sched_le(struct hci_dev *hdev)
1807{
1808 struct hci_conn *conn;
1809 struct sk_buff *skb;
1810 int quote, cnt;
1811
1812 BT_DBG("%s", hdev->name);
1813
1814 if (!test_bit(HCI_RAW, &hdev->flags)) {
1815 /* LE tx timeout must be longer than maximum
1816 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03001817 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001818 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03001819 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001820 }
1821
1822 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1823 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1824 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1825 BT_DBG("skb %p len %d", skb, skb->len);
1826
1827 hci_send_frame(skb);
1828 hdev->le_last_tx = jiffies;
1829
1830 cnt--;
1831 conn->sent++;
1832 }
1833 }
1834 if (hdev->le_pkts)
1835 hdev->le_cnt = cnt;
1836 else
1837 hdev->acl_cnt = cnt;
1838}
1839
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840static void hci_tx_task(unsigned long arg)
1841{
1842 struct hci_dev *hdev = (struct hci_dev *) arg;
1843 struct sk_buff *skb;
1844
1845 read_lock(&hci_task_lock);
1846
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001847 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1848 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 /* Schedule queues and send stuff to HCI driver */
1851
1852 hci_sched_acl(hdev);
1853
1854 hci_sched_sco(hdev);
1855
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001856 hci_sched_esco(hdev);
1857
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001858 hci_sched_le(hdev);
1859
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 /* Send next queued raw (unknown type) packet */
1861 while ((skb = skb_dequeue(&hdev->raw_q)))
1862 hci_send_frame(skb);
1863
1864 read_unlock(&hci_task_lock);
1865}
1866
1867/* ----- HCI RX task (incoming data proccessing) ----- */
1868
1869/* ACL data packet */
1870static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1871{
1872 struct hci_acl_hdr *hdr = (void *) skb->data;
1873 struct hci_conn *conn;
1874 __u16 handle, flags;
1875
1876 skb_pull(skb, HCI_ACL_HDR_SIZE);
1877
1878 handle = __le16_to_cpu(hdr->handle);
1879 flags = hci_flags(handle);
1880 handle = hci_handle(handle);
1881
1882 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1883
1884 hdev->stat.acl_rx++;
1885
1886 hci_dev_lock(hdev);
1887 conn = hci_conn_hash_lookup_handle(hdev, handle);
1888 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 if (conn) {
1891 register struct hci_proto *hp;
1892
Marcel Holtmann04837f62006-07-03 10:02:33 +02001893 hci_conn_enter_active_mode(conn);
1894
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001896 hp = hci_proto[HCI_PROTO_L2CAP];
1897 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 hp->recv_acldata(conn, skb, flags);
1899 return;
1900 }
1901 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001902 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 hdev->name, handle);
1904 }
1905
1906 kfree_skb(skb);
1907}
1908
1909/* SCO data packet */
1910static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1911{
1912 struct hci_sco_hdr *hdr = (void *) skb->data;
1913 struct hci_conn *conn;
1914 __u16 handle;
1915
1916 skb_pull(skb, HCI_SCO_HDR_SIZE);
1917
1918 handle = __le16_to_cpu(hdr->handle);
1919
1920 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1921
1922 hdev->stat.sco_rx++;
1923
1924 hci_dev_lock(hdev);
1925 conn = hci_conn_hash_lookup_handle(hdev, handle);
1926 hci_dev_unlock(hdev);
1927
1928 if (conn) {
1929 register struct hci_proto *hp;
1930
1931 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001932 hp = hci_proto[HCI_PROTO_SCO];
1933 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 hp->recv_scodata(conn, skb);
1935 return;
1936 }
1937 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001938 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 hdev->name, handle);
1940 }
1941
1942 kfree_skb(skb);
1943}
1944
Marcel Holtmann65164552005-10-28 19:20:48 +02001945static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 struct hci_dev *hdev = (struct hci_dev *) arg;
1948 struct sk_buff *skb;
1949
1950 BT_DBG("%s", hdev->name);
1951
1952 read_lock(&hci_task_lock);
1953
1954 while ((skb = skb_dequeue(&hdev->rx_q))) {
1955 if (atomic_read(&hdev->promisc)) {
1956 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001957 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 }
1959
1960 if (test_bit(HCI_RAW, &hdev->flags)) {
1961 kfree_skb(skb);
1962 continue;
1963 }
1964
1965 if (test_bit(HCI_INIT, &hdev->flags)) {
1966 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001967 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 case HCI_ACLDATA_PKT:
1969 case HCI_SCODATA_PKT:
1970 kfree_skb(skb);
1971 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 }
1974
1975 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001976 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 case HCI_EVENT_PKT:
1978 hci_event_packet(hdev, skb);
1979 break;
1980
1981 case HCI_ACLDATA_PKT:
1982 BT_DBG("%s ACL data packet", hdev->name);
1983 hci_acldata_packet(hdev, skb);
1984 break;
1985
1986 case HCI_SCODATA_PKT:
1987 BT_DBG("%s SCO data packet", hdev->name);
1988 hci_scodata_packet(hdev, skb);
1989 break;
1990
1991 default:
1992 kfree_skb(skb);
1993 break;
1994 }
1995 }
1996
1997 read_unlock(&hci_task_lock);
1998}
1999
2000static void hci_cmd_task(unsigned long arg)
2001{
2002 struct hci_dev *hdev = (struct hci_dev *) arg;
2003 struct sk_buff *skb;
2004
2005 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2006
S.Çağlar Onur82453022008-02-17 23:25:57 -08002007 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 BT_ERR("%s command tx timeout", hdev->name);
2009 atomic_set(&hdev->cmd_cnt, 1);
2010 }
2011
2012 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002013 if (atomic_read(&hdev->cmd_cnt)) {
2014 skb = skb_dequeue(&hdev->cmd_q);
2015 if (!skb)
2016 return;
2017
Wei Yongjun7585b972009-02-25 18:29:52 +08002018 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002020 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2021 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 atomic_dec(&hdev->cmd_cnt);
2023 hci_send_frame(skb);
2024 hdev->cmd_last_tx = jiffies;
2025 } else {
2026 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002027 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 }
2029 }
2030}