blob: 92960532dea48d3435058c73de19848bcf019e90 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <net/sock.h>
45
46#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020047#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/unaligned.h>
49
50#include <net/bluetooth/bluetooth.h>
51#include <net/bluetooth/hci_core.h>
52
Johan Hedbergab81cbf2010-12-15 13:53:18 +020053#define AUTO_OFF_TIMEOUT 2000
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055static void hci_cmd_task(unsigned long arg);
56static void hci_rx_task(unsigned long arg);
57static void hci_tx_task(unsigned long arg);
58static void hci_notify(struct hci_dev *hdev, int event);
59
60static DEFINE_RWLOCK(hci_task_lock);
61
62/* HCI device list */
63LIST_HEAD(hci_dev_list);
64DEFINE_RWLOCK(hci_dev_list_lock);
65
66/* HCI callback list */
67LIST_HEAD(hci_cb_list);
68DEFINE_RWLOCK(hci_cb_list_lock);
69
70/* HCI protocols */
71#define HCI_MAX_PROTO 2
72struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080075static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
77/* ---- HCI notifications ---- */
78
79int hci_register_notifier(struct notifier_block *nb)
80{
Alan Sterne041c682006-03-27 01:16:30 -080081 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070082}
83
84int hci_unregister_notifier(struct notifier_block *nb)
85{
Alan Sterne041c682006-03-27 01:16:30 -080086 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Marcel Holtmann65164552005-10-28 19:20:48 +020089static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070090{
Alan Sterne041c682006-03-27 01:16:30 -080091 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
94/* ---- HCI requests ---- */
95
Johan Hedberg23bb5762010-12-21 23:01:27 +020096void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Johan Hedberg23bb5762010-12-21 23:01:27 +020098 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
Johan Hedberga5040ef2011-01-10 13:28:59 +0200100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
102 */
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
110 }
111}
112
113static void hci_req_cancel(struct hci_dev *hdev, int err)
114{
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
121 }
122}
123
124/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900125static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 unsigned long opt, __u32 timeout)
127{
128 DECLARE_WAITQUEUE(wait, current);
129 int err = 0;
130
131 BT_DBG("%s start", hdev->name);
132
133 hdev->req_status = HCI_REQ_PEND;
134
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
137
138 req(hdev, opt);
139 schedule_timeout(timeout);
140
141 remove_wait_queue(&hdev->req_wait_q, &wait);
142
143 if (signal_pending(current))
144 return -EINTR;
145
146 switch (hdev->req_status) {
147 case HCI_REQ_DONE:
148 err = -bt_err(hdev->req_result);
149 break;
150
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
153 break;
154
155 default:
156 err = -ETIMEDOUT;
157 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700158 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
Johan Hedberga5040ef2011-01-10 13:28:59 +0200160 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
162 BT_DBG("%s end: err %d", hdev->name, err);
163
164 return err;
165}
166
167static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
169{
170 int ret;
171
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200172 if (!test_bit(HCI_UP, &hdev->flags))
173 return -ENETDOWN;
174
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 /* Serialize all requests */
176 hci_req_lock(hdev);
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
179
180 return ret;
181}
182
183static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184{
185 BT_DBG("%s %ld", hdev->name, opt);
186
187 /* Reset device */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200188 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200193 struct hci_cp_delete_stored_link_key cp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800195 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200196 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198 BT_DBG("%s %ld", hdev->name, opt);
199
200 /* Driver initialization */
201
202 /* Special commands */
203 while ((skb = skb_dequeue(&hdev->driver_init))) {
Marcel Holtmann0d48d932005-08-09 20:30:28 -0700204 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100208 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 }
210 skb_queue_purge(&hdev->driver_init);
211
212 /* Mandatory initialization */
213
214 /* Reset */
Marcel Holtmann7a9d4022008-11-30 12:17:26 +0100215 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200221 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227#if 0
228 /* Host buffer size */
229 {
230 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700231 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700233 cp.acl_max_pkt = cpu_to_le16(0xffff);
234 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237#endif
238
239 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242 /* Read Class of Device */
243 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245 /* Read Local Name */
246 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200249 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
251 /* Optional initialization */
252
253 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200254 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200255 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700258 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200259 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200260
261 bacpy(&cp.bdaddr, BDADDR_ANY);
262 cp.delete_all = 1;
263 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264}
265
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300266static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267{
268 BT_DBG("%s", hdev->name);
269
270 /* Read LE buffer size */
271 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275{
276 __u8 scan = opt;
277
278 BT_DBG("%s %x", hdev->name, scan);
279
280 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200281 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282}
283
284static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 auth = opt;
287
288 BT_DBG("%s %x", hdev->name, auth);
289
290 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200291 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 encrypt = opt;
297
298 BT_DBG("%s %x", hdev->name, encrypt);
299
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200300 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200301 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200304static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __le16 policy = cpu_to_le16(opt);
307
Marcel Holtmanna418b892008-11-30 12:17:28 +0100308 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200309
310 /* Default link policy */
311 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312}
313
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900314/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * Device is held on return. */
316struct hci_dev *hci_dev_get(int index)
317{
318 struct hci_dev *hdev = NULL;
319 struct list_head *p;
320
321 BT_DBG("%d", index);
322
323 if (index < 0)
324 return NULL;
325
326 read_lock(&hci_dev_list_lock);
327 list_for_each(p, &hci_dev_list) {
328 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329 if (d->id == index) {
330 hdev = hci_dev_hold(d);
331 break;
332 }
333 }
334 read_unlock(&hci_dev_list_lock);
335 return hdev;
336}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338/* ---- Inquiry support ---- */
339static void inquiry_cache_flush(struct hci_dev *hdev)
340{
341 struct inquiry_cache *cache = &hdev->inq_cache;
342 struct inquiry_entry *next = cache->list, *e;
343
344 BT_DBG("cache %p", cache);
345
346 cache->list = NULL;
347 while ((e = next)) {
348 next = e->next;
349 kfree(e);
350 }
351}
352
353struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354{
355 struct inquiry_cache *cache = &hdev->inq_cache;
356 struct inquiry_entry *e;
357
358 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360 for (e = cache->list; e; e = e->next)
361 if (!bacmp(&e->data.bdaddr, bdaddr))
362 break;
363 return e;
364}
365
366void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367{
368 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200369 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200373 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200376 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200379
380 ie->next = cache->list;
381 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 }
383
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200384 memcpy(&ie->data, data, sizeof(*data));
385 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 cache->timestamp = jiffies;
387}
388
389static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
392 struct inquiry_info *info = (struct inquiry_info *) buf;
393 struct inquiry_entry *e;
394 int copied = 0;
395
396 for (e = cache->list; e && copied < num; e = e->next, copied++) {
397 struct inquiry_data *data = &e->data;
398 bacpy(&info->bdaddr, &data->bdaddr);
399 info->pscan_rep_mode = data->pscan_rep_mode;
400 info->pscan_period_mode = data->pscan_period_mode;
401 info->pscan_mode = data->pscan_mode;
402 memcpy(info->dev_class, data->dev_class, 3);
403 info->clock_offset = data->clock_offset;
404 info++;
405 }
406
407 BT_DBG("cache %p, copied %d", cache, copied);
408 return copied;
409}
410
411static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412{
413 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414 struct hci_cp_inquiry cp;
415
416 BT_DBG("%s", hdev->name);
417
418 if (test_bit(HCI_INQUIRY, &hdev->flags))
419 return;
420
421 /* Start Inquiry */
422 memcpy(&cp.lap, &ir->lap, 3);
423 cp.length = ir->length;
424 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200425 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426}
427
428int hci_inquiry(void __user *arg)
429{
430 __u8 __user *ptr = arg;
431 struct hci_inquiry_req ir;
432 struct hci_dev *hdev;
433 int err = 0, do_inquiry = 0, max_rsp;
434 long timeo;
435 __u8 *buf;
436
437 if (copy_from_user(&ir, ptr, sizeof(ir)))
438 return -EFAULT;
439
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200440 hdev = hci_dev_get(ir.dev_id);
441 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 return -ENODEV;
443
444 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900445 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200446 inquiry_cache_empty(hdev) ||
447 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 inquiry_cache_flush(hdev);
449 do_inquiry = 1;
450 }
451 hci_dev_unlock_bh(hdev);
452
Marcel Holtmann04837f62006-07-03 10:02:33 +0200453 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200454
455 if (do_inquiry) {
456 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457 if (err < 0)
458 goto done;
459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
461 /* for unlimited number of responses we will use buffer with 255 entries */
462 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465 * copy it to the user space.
466 */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200467 buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
468 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 err = -ENOMEM;
470 goto done;
471 }
472
473 hci_dev_lock_bh(hdev);
474 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475 hci_dev_unlock_bh(hdev);
476
477 BT_DBG("num_rsp %d", ir.num_rsp);
478
479 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480 ptr += sizeof(ir);
481 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482 ir.num_rsp))
483 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900484 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 err = -EFAULT;
486
487 kfree(buf);
488
489done:
490 hci_dev_put(hdev);
491 return err;
492}
493
494/* ---- HCI ioctl helpers ---- */
495
496int hci_dev_open(__u16 dev)
497{
498 struct hci_dev *hdev;
499 int ret = 0;
500
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200501 hdev = hci_dev_get(dev);
502 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 return -ENODEV;
504
505 BT_DBG("%s %p", hdev->name, hdev);
506
507 hci_req_lock(hdev);
508
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200509 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510 ret = -ERFKILL;
511 goto done;
512 }
513
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 if (test_bit(HCI_UP, &hdev->flags)) {
515 ret = -EALREADY;
516 goto done;
517 }
518
519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520 set_bit(HCI_RAW, &hdev->flags);
521
Marcel Holtmann943da252010-02-13 02:28:41 +0100522 /* Treat all non BR/EDR controllers as raw devices for now */
523 if (hdev->dev_type != HCI_BREDR)
524 set_bit(HCI_RAW, &hdev->flags);
525
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 if (hdev->open(hdev)) {
527 ret = -EIO;
528 goto done;
529 }
530
531 if (!test_bit(HCI_RAW, &hdev->flags)) {
532 atomic_set(&hdev->cmd_cnt, 1);
533 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200534 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 //__hci_request(hdev, hci_reset_req, 0, HZ);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200537 ret = __hci_request(hdev, hci_init_req, 0,
538 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300540 if (lmp_le_capable(hdev))
541 ret = __hci_request(hdev, hci_le_init_req, 0,
542 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 clear_bit(HCI_INIT, &hdev->flags);
545 }
546
547 if (!ret) {
548 hci_dev_hold(hdev);
549 set_bit(HCI_UP, &hdev->flags);
550 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200551 if (!test_bit(HCI_SETUP, &hdev->flags))
552 mgmt_powered(hdev->id, 1);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900553 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 /* Init failed, cleanup */
555 tasklet_kill(&hdev->rx_task);
556 tasklet_kill(&hdev->tx_task);
557 tasklet_kill(&hdev->cmd_task);
558
559 skb_queue_purge(&hdev->cmd_q);
560 skb_queue_purge(&hdev->rx_q);
561
562 if (hdev->flush)
563 hdev->flush(hdev);
564
565 if (hdev->sent_cmd) {
566 kfree_skb(hdev->sent_cmd);
567 hdev->sent_cmd = NULL;
568 }
569
570 hdev->close(hdev);
571 hdev->flags = 0;
572 }
573
574done:
575 hci_req_unlock(hdev);
576 hci_dev_put(hdev);
577 return ret;
578}
579
580static int hci_dev_do_close(struct hci_dev *hdev)
581{
582 BT_DBG("%s %p", hdev->name, hdev);
583
584 hci_req_cancel(hdev, ENODEV);
585 hci_req_lock(hdev);
586
587 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
588 hci_req_unlock(hdev);
589 return 0;
590 }
591
592 /* Kill RX and TX tasks */
593 tasklet_kill(&hdev->rx_task);
594 tasklet_kill(&hdev->tx_task);
595
596 hci_dev_lock_bh(hdev);
597 inquiry_cache_flush(hdev);
598 hci_conn_hash_flush(hdev);
599 hci_dev_unlock_bh(hdev);
600
601 hci_notify(hdev, HCI_DEV_DOWN);
602
603 if (hdev->flush)
604 hdev->flush(hdev);
605
606 /* Reset device */
607 skb_queue_purge(&hdev->cmd_q);
608 atomic_set(&hdev->cmd_cnt, 1);
609 if (!test_bit(HCI_RAW, &hdev->flags)) {
610 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200611 __hci_request(hdev, hci_reset_req, 0,
612 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 clear_bit(HCI_INIT, &hdev->flags);
614 }
615
616 /* Kill cmd task */
617 tasklet_kill(&hdev->cmd_task);
618
619 /* Drop queues */
620 skb_queue_purge(&hdev->rx_q);
621 skb_queue_purge(&hdev->cmd_q);
622 skb_queue_purge(&hdev->raw_q);
623
624 /* Drop last sent command */
625 if (hdev->sent_cmd) {
626 kfree_skb(hdev->sent_cmd);
627 hdev->sent_cmd = NULL;
628 }
629
630 /* After this point our queues are empty
631 * and no tasks are scheduled. */
632 hdev->close(hdev);
633
Johan Hedberg5add6af2010-12-16 10:00:37 +0200634 mgmt_powered(hdev->id, 0);
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 /* Clear flags */
637 hdev->flags = 0;
638
639 hci_req_unlock(hdev);
640
641 hci_dev_put(hdev);
642 return 0;
643}
644
645int hci_dev_close(__u16 dev)
646{
647 struct hci_dev *hdev;
648 int err;
649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200650 hdev = hci_dev_get(dev);
651 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 return -ENODEV;
653 err = hci_dev_do_close(hdev);
654 hci_dev_put(hdev);
655 return err;
656}
657
658int hci_dev_reset(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200663 hdev = hci_dev_get(dev);
664 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return -ENODEV;
666
667 hci_req_lock(hdev);
668 tasklet_disable(&hdev->tx_task);
669
670 if (!test_bit(HCI_UP, &hdev->flags))
671 goto done;
672
673 /* Drop queues */
674 skb_queue_purge(&hdev->rx_q);
675 skb_queue_purge(&hdev->cmd_q);
676
677 hci_dev_lock_bh(hdev);
678 inquiry_cache_flush(hdev);
679 hci_conn_hash_flush(hdev);
680 hci_dev_unlock_bh(hdev);
681
682 if (hdev->flush)
683 hdev->flush(hdev);
684
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900685 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300686 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 ret = __hci_request(hdev, hci_reset_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692done:
693 tasklet_enable(&hdev->tx_task);
694 hci_req_unlock(hdev);
695 hci_dev_put(hdev);
696 return ret;
697}
698
699int hci_dev_reset_stat(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200704 hdev = hci_dev_get(dev);
705 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 return -ENODEV;
707
708 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710 hci_dev_put(hdev);
711
712 return ret;
713}
714
715int hci_dev_cmd(unsigned int cmd, void __user *arg)
716{
717 struct hci_dev *hdev;
718 struct hci_dev_req dr;
719 int err = 0;
720
721 if (copy_from_user(&dr, arg, sizeof(dr)))
722 return -EFAULT;
723
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200724 hdev = hci_dev_get(dr.dev_id);
725 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 return -ENODEV;
727
728 switch (cmd) {
729 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200730 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 break;
733
734 case HCISETENCRYPT:
735 if (!lmp_encrypt_capable(hdev)) {
736 err = -EOPNOTSUPP;
737 break;
738 }
739
740 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200742 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 if (err)
745 break;
746 }
747
Marcel Holtmann04837f62006-07-03 10:02:33 +0200748 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 break;
751
752 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200753 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 break;
756
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757 case HCISETLINKPOL:
758 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
760 break;
761
762 case HCISETLINKMODE:
763 hdev->link_mode = ((__u16) dr.dev_opt) &
764 (HCI_LM_MASTER | HCI_LM_ACCEPT);
765 break;
766
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 case HCISETPTYPE:
768 hdev->pkt_type = (__u16) dr.dev_opt;
769 break;
770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200772 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
773 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 break;
775
776 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200777 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 break;
780
781 default:
782 err = -EINVAL;
783 break;
784 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 hci_dev_put(hdev);
787 return err;
788}
789
790int hci_get_dev_list(void __user *arg)
791{
792 struct hci_dev_list_req *dl;
793 struct hci_dev_req *dr;
794 struct list_head *p;
795 int n = 0, size, err;
796 __u16 dev_num;
797
798 if (get_user(dev_num, (__u16 __user *) arg))
799 return -EFAULT;
800
801 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802 return -EINVAL;
803
804 size = sizeof(*dl) + dev_num * sizeof(*dr);
805
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200806 dl = kzalloc(size, GFP_KERNEL);
807 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 return -ENOMEM;
809
810 dr = dl->dev_req;
811
812 read_lock_bh(&hci_dev_list_lock);
813 list_for_each(p, &hci_dev_list) {
814 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 hdev = list_entry(p, struct hci_dev, list);
Johan Hedbergc542a062011-01-26 13:11:03 +0200817
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200818 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200819
820 if (!test_bit(HCI_MGMT, &hdev->flags))
821 set_bit(HCI_PAIRABLE, &hdev->flags);
822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 (dr + n)->dev_id = hdev->id;
824 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 if (++n >= dev_num)
827 break;
828 }
829 read_unlock_bh(&hci_dev_list_lock);
830
831 dl->dev_num = n;
832 size = sizeof(*dl) + n * sizeof(*dr);
833
834 err = copy_to_user(arg, dl, size);
835 kfree(dl);
836
837 return err ? -EFAULT : 0;
838}
839
840int hci_get_dev_info(void __user *arg)
841{
842 struct hci_dev *hdev;
843 struct hci_dev_info di;
844 int err = 0;
845
846 if (copy_from_user(&di, arg, sizeof(di)))
847 return -EFAULT;
848
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200849 hdev = hci_dev_get(di.dev_id);
850 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 return -ENODEV;
852
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200853 hci_del_off_timer(hdev);
854
Johan Hedbergc542a062011-01-26 13:11:03 +0200855 if (!test_bit(HCI_MGMT, &hdev->flags))
856 set_bit(HCI_PAIRABLE, &hdev->flags);
857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 strcpy(di.name, hdev->name);
859 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100860 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 di.flags = hdev->flags;
862 di.pkt_type = hdev->pkt_type;
863 di.acl_mtu = hdev->acl_mtu;
864 di.acl_pkts = hdev->acl_pkts;
865 di.sco_mtu = hdev->sco_mtu;
866 di.sco_pkts = hdev->sco_pkts;
867 di.link_policy = hdev->link_policy;
868 di.link_mode = hdev->link_mode;
869
870 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871 memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873 if (copy_to_user(arg, &di, sizeof(di)))
874 err = -EFAULT;
875
876 hci_dev_put(hdev);
877
878 return err;
879}
880
881/* ---- Interface to HCI drivers ---- */
882
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200883static int hci_rfkill_set_block(void *data, bool blocked)
884{
885 struct hci_dev *hdev = data;
886
887 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889 if (!blocked)
890 return 0;
891
892 hci_dev_do_close(hdev);
893
894 return 0;
895}
896
897static const struct rfkill_ops hci_rfkill_ops = {
898 .set_block = hci_rfkill_set_block,
899};
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901/* Alloc HCI device */
902struct hci_dev *hci_alloc_dev(void)
903{
904 struct hci_dev *hdev;
905
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200906 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if (!hdev)
908 return NULL;
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 skb_queue_head_init(&hdev->driver_init);
911
912 return hdev;
913}
914EXPORT_SYMBOL(hci_alloc_dev);
915
916/* Free HCI device */
917void hci_free_dev(struct hci_dev *hdev)
918{
919 skb_queue_purge(&hdev->driver_init);
920
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200921 /* will free via device release */
922 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924EXPORT_SYMBOL(hci_free_dev);
925
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200926static void hci_power_on(struct work_struct *work)
927{
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930 BT_DBG("%s", hdev->name);
931
932 if (hci_dev_open(hdev->id) < 0)
933 return;
934
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
941}
942
943static void hci_power_off(struct work_struct *work)
944{
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947 BT_DBG("%s", hdev->name);
948
949 hci_dev_close(hdev->id);
950}
951
952static void hci_auto_off(unsigned long data)
953{
954 struct hci_dev *hdev = (struct hci_dev *) data;
955
956 BT_DBG("%s", hdev->name);
957
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960 queue_work(hdev->workqueue, &hdev->power_off);
961}
962
963void hci_del_off_timer(struct hci_dev *hdev)
964{
965 BT_DBG("%s", hdev->name);
966
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
969}
970
Johan Hedberg2aeb9a12011-01-04 12:08:51 +0200971int hci_uuids_clear(struct hci_dev *hdev)
972{
973 struct list_head *p, *n;
974
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
977
978 uuid = list_entry(p, struct bt_uuid, list);
979
980 list_del(p);
981 kfree(uuid);
982 }
983
984 return 0;
985}
986
Johan Hedberg55ed8ca2011-01-17 14:41:05 +0200987int hci_link_keys_clear(struct hci_dev *hdev)
988{
989 struct list_head *p, *n;
990
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
993
994 key = list_entry(p, struct link_key, list);
995
996 list_del(p);
997 kfree(key);
998 }
999
1000 return 0;
1001}
1002
1003struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004{
1005 struct list_head *p;
1006
1007 list_for_each(p, &hdev->link_keys) {
1008 struct link_key *k;
1009
1010 k = list_entry(p, struct link_key, list);
1011
1012 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013 return k;
1014 }
1015
1016 return NULL;
1017}
1018
1019int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020 u8 *val, u8 type, u8 pin_len)
1021{
1022 struct link_key *key, *old_key;
1023 u8 old_key_type;
1024
1025 old_key = hci_find_link_key(hdev, bdaddr);
1026 if (old_key) {
1027 old_key_type = old_key->type;
1028 key = old_key;
1029 } else {
1030 old_key_type = 0xff;
1031 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032 if (!key)
1033 return -ENOMEM;
1034 list_add(&key->list, &hdev->link_keys);
1035 }
1036
1037 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039 bacpy(&key->bdaddr, bdaddr);
1040 memcpy(key->val, val, 16);
1041 key->type = type;
1042 key->pin_len = pin_len;
1043
1044 if (new_key)
1045 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047 if (type == 0x06)
1048 key->type = old_key_type;
1049
1050 return 0;
1051}
1052
1053int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054{
1055 struct link_key *key;
1056
1057 key = hci_find_link_key(hdev, bdaddr);
1058 if (!key)
1059 return -ENOENT;
1060
1061 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063 list_del(&key->list);
1064 kfree(key);
1065
1066 return 0;
1067}
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/* Register HCI device */
1070int hci_register_dev(struct hci_dev *hdev)
1071{
1072 struct list_head *head = &hci_dev_list, *p;
Marcel Holtmannef222012007-07-11 06:42:04 +02001073 int i, id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001075 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1076 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078 if (!hdev->open || !hdev->close || !hdev->destruct)
1079 return -EINVAL;
1080
1081 write_lock_bh(&hci_dev_list_lock);
1082
1083 /* Find first available device id */
1084 list_for_each(p, &hci_dev_list) {
1085 if (list_entry(p, struct hci_dev, list)->id != id)
1086 break;
1087 head = p; id++;
1088 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 sprintf(hdev->name, "hci%d", id);
1091 hdev->id = id;
1092 list_add(&hdev->list, head);
1093
1094 atomic_set(&hdev->refcnt, 1);
1095 spin_lock_init(&hdev->lock);
1096
1097 hdev->flags = 0;
1098 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001099 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001101 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102
Marcel Holtmann04837f62006-07-03 10:02:33 +02001103 hdev->idle_timeout = 0;
1104 hdev->sniff_max_interval = 800;
1105 hdev->sniff_min_interval = 80;
1106
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001107 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1109 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1110
1111 skb_queue_head_init(&hdev->rx_q);
1112 skb_queue_head_init(&hdev->cmd_q);
1113 skb_queue_head_init(&hdev->raw_q);
1114
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301115 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001116 hdev->reassembly[i] = NULL;
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001119 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
1121 inquiry_cache_init(hdev);
1122
1123 hci_conn_hash_init(hdev);
1124
David Millerea4bd8b2010-07-30 21:54:49 -07001125 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001126
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001127 INIT_LIST_HEAD(&hdev->uuids);
1128
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001129 INIT_LIST_HEAD(&hdev->link_keys);
1130
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001131 INIT_WORK(&hdev->power_on, hci_power_on);
1132 INIT_WORK(&hdev->power_off, hci_power_off);
1133 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1136
1137 atomic_set(&hdev->promisc, 0);
1138
1139 write_unlock_bh(&hci_dev_list_lock);
1140
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001141 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1142 if (!hdev->workqueue)
1143 goto nomem;
1144
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 hci_register_sysfs(hdev);
1146
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001147 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1148 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1149 if (hdev->rfkill) {
1150 if (rfkill_register(hdev->rfkill) < 0) {
1151 rfkill_destroy(hdev->rfkill);
1152 hdev->rfkill = NULL;
1153 }
1154 }
1155
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001156 set_bit(HCI_AUTO_OFF, &hdev->flags);
1157 set_bit(HCI_SETUP, &hdev->flags);
1158 queue_work(hdev->workqueue, &hdev->power_on);
1159
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 hci_notify(hdev, HCI_DEV_REG);
1161
1162 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001163
1164nomem:
1165 write_lock_bh(&hci_dev_list_lock);
1166 list_del(&hdev->list);
1167 write_unlock_bh(&hci_dev_list_lock);
1168
1169 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170}
1171EXPORT_SYMBOL(hci_register_dev);
1172
1173/* Unregister HCI device */
1174int hci_unregister_dev(struct hci_dev *hdev)
1175{
Marcel Holtmannef222012007-07-11 06:42:04 +02001176 int i;
1177
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001178 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 write_lock_bh(&hci_dev_list_lock);
1181 list_del(&hdev->list);
1182 write_unlock_bh(&hci_dev_list_lock);
1183
1184 hci_dev_do_close(hdev);
1185
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301186 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001187 kfree_skb(hdev->reassembly[i]);
1188
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001189 if (!test_bit(HCI_INIT, &hdev->flags) &&
1190 !test_bit(HCI_SETUP, &hdev->flags))
1191 mgmt_index_removed(hdev->id);
1192
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 hci_notify(hdev, HCI_DEV_UNREG);
1194
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001195 if (hdev->rfkill) {
1196 rfkill_unregister(hdev->rfkill);
1197 rfkill_destroy(hdev->rfkill);
1198 }
1199
Dave Young147e2d52008-03-05 18:45:59 -08001200 hci_unregister_sysfs(hdev);
1201
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001202 destroy_workqueue(hdev->workqueue);
1203
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001204 hci_dev_lock_bh(hdev);
1205 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001206 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001207 hci_link_keys_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001208 hci_dev_unlock_bh(hdev);
1209
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 return 0;
1213}
1214EXPORT_SYMBOL(hci_unregister_dev);
1215
1216/* Suspend HCI device */
1217int hci_suspend_dev(struct hci_dev *hdev)
1218{
1219 hci_notify(hdev, HCI_DEV_SUSPEND);
1220 return 0;
1221}
1222EXPORT_SYMBOL(hci_suspend_dev);
1223
1224/* Resume HCI device */
1225int hci_resume_dev(struct hci_dev *hdev)
1226{
1227 hci_notify(hdev, HCI_DEV_RESUME);
1228 return 0;
1229}
1230EXPORT_SYMBOL(hci_resume_dev);
1231
Marcel Holtmann76bca882009-11-18 00:40:39 +01001232/* Receive frame from HCI drivers */
1233int hci_recv_frame(struct sk_buff *skb)
1234{
1235 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1236 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1237 && !test_bit(HCI_INIT, &hdev->flags))) {
1238 kfree_skb(skb);
1239 return -ENXIO;
1240 }
1241
1242 /* Incomming skb */
1243 bt_cb(skb)->incoming = 1;
1244
1245 /* Time stamp */
1246 __net_timestamp(skb);
1247
1248 /* Queue frame for rx task */
1249 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001250 tasklet_schedule(&hdev->rx_task);
1251
Marcel Holtmann76bca882009-11-18 00:40:39 +01001252 return 0;
1253}
1254EXPORT_SYMBOL(hci_recv_frame);
1255
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301256static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1257 int count, __u8 index, gfp_t gfp_mask)
1258{
1259 int len = 0;
1260 int hlen = 0;
1261 int remain = count;
1262 struct sk_buff *skb;
1263 struct bt_skb_cb *scb;
1264
1265 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1266 index >= NUM_REASSEMBLY)
1267 return -EILSEQ;
1268
1269 skb = hdev->reassembly[index];
1270
1271 if (!skb) {
1272 switch (type) {
1273 case HCI_ACLDATA_PKT:
1274 len = HCI_MAX_FRAME_SIZE;
1275 hlen = HCI_ACL_HDR_SIZE;
1276 break;
1277 case HCI_EVENT_PKT:
1278 len = HCI_MAX_EVENT_SIZE;
1279 hlen = HCI_EVENT_HDR_SIZE;
1280 break;
1281 case HCI_SCODATA_PKT:
1282 len = HCI_MAX_SCO_SIZE;
1283 hlen = HCI_SCO_HDR_SIZE;
1284 break;
1285 }
1286
1287 skb = bt_skb_alloc(len, gfp_mask);
1288 if (!skb)
1289 return -ENOMEM;
1290
1291 scb = (void *) skb->cb;
1292 scb->expect = hlen;
1293 scb->pkt_type = type;
1294
1295 skb->dev = (void *) hdev;
1296 hdev->reassembly[index] = skb;
1297 }
1298
1299 while (count) {
1300 scb = (void *) skb->cb;
1301 len = min(scb->expect, (__u16)count);
1302
1303 memcpy(skb_put(skb, len), data, len);
1304
1305 count -= len;
1306 data += len;
1307 scb->expect -= len;
1308 remain = count;
1309
1310 switch (type) {
1311 case HCI_EVENT_PKT:
1312 if (skb->len == HCI_EVENT_HDR_SIZE) {
1313 struct hci_event_hdr *h = hci_event_hdr(skb);
1314 scb->expect = h->plen;
1315
1316 if (skb_tailroom(skb) < scb->expect) {
1317 kfree_skb(skb);
1318 hdev->reassembly[index] = NULL;
1319 return -ENOMEM;
1320 }
1321 }
1322 break;
1323
1324 case HCI_ACLDATA_PKT:
1325 if (skb->len == HCI_ACL_HDR_SIZE) {
1326 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1327 scb->expect = __le16_to_cpu(h->dlen);
1328
1329 if (skb_tailroom(skb) < scb->expect) {
1330 kfree_skb(skb);
1331 hdev->reassembly[index] = NULL;
1332 return -ENOMEM;
1333 }
1334 }
1335 break;
1336
1337 case HCI_SCODATA_PKT:
1338 if (skb->len == HCI_SCO_HDR_SIZE) {
1339 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1340 scb->expect = h->dlen;
1341
1342 if (skb_tailroom(skb) < scb->expect) {
1343 kfree_skb(skb);
1344 hdev->reassembly[index] = NULL;
1345 return -ENOMEM;
1346 }
1347 }
1348 break;
1349 }
1350
1351 if (scb->expect == 0) {
1352 /* Complete frame */
1353
1354 bt_cb(skb)->pkt_type = type;
1355 hci_recv_frame(skb);
1356
1357 hdev->reassembly[index] = NULL;
1358 return remain;
1359 }
1360 }
1361
1362 return remain;
1363}
1364
Marcel Holtmannef222012007-07-11 06:42:04 +02001365int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1366{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301367 int rem = 0;
1368
Marcel Holtmannef222012007-07-11 06:42:04 +02001369 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1370 return -EILSEQ;
1371
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001372 while (count) {
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301373 rem = hci_reassembly(hdev, type, data, count,
1374 type - 1, GFP_ATOMIC);
1375 if (rem < 0)
1376 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001377
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301378 data += (count - rem);
1379 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001380 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001381
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301382 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001383}
1384EXPORT_SYMBOL(hci_recv_fragment);
1385
Suraj Sumangala99811512010-07-14 13:02:19 +05301386#define STREAM_REASSEMBLY 0
1387
1388int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1389{
1390 int type;
1391 int rem = 0;
1392
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001393 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301394 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1395
1396 if (!skb) {
1397 struct { char type; } *pkt;
1398
1399 /* Start of the frame */
1400 pkt = data;
1401 type = pkt->type;
1402
1403 data++;
1404 count--;
1405 } else
1406 type = bt_cb(skb)->pkt_type;
1407
1408 rem = hci_reassembly(hdev, type, data,
1409 count, STREAM_REASSEMBLY, GFP_ATOMIC);
1410 if (rem < 0)
1411 return rem;
1412
1413 data += (count - rem);
1414 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001415 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301416
1417 return rem;
1418}
1419EXPORT_SYMBOL(hci_recv_stream_fragment);
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421/* ---- Interface to upper protocols ---- */
1422
1423/* Register/Unregister protocols.
1424 * hci_task_lock is used to ensure that no tasks are running. */
1425int hci_register_proto(struct hci_proto *hp)
1426{
1427 int err = 0;
1428
1429 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1430
1431 if (hp->id >= HCI_MAX_PROTO)
1432 return -EINVAL;
1433
1434 write_lock_bh(&hci_task_lock);
1435
1436 if (!hci_proto[hp->id])
1437 hci_proto[hp->id] = hp;
1438 else
1439 err = -EEXIST;
1440
1441 write_unlock_bh(&hci_task_lock);
1442
1443 return err;
1444}
1445EXPORT_SYMBOL(hci_register_proto);
1446
1447int hci_unregister_proto(struct hci_proto *hp)
1448{
1449 int err = 0;
1450
1451 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1452
1453 if (hp->id >= HCI_MAX_PROTO)
1454 return -EINVAL;
1455
1456 write_lock_bh(&hci_task_lock);
1457
1458 if (hci_proto[hp->id])
1459 hci_proto[hp->id] = NULL;
1460 else
1461 err = -ENOENT;
1462
1463 write_unlock_bh(&hci_task_lock);
1464
1465 return err;
1466}
1467EXPORT_SYMBOL(hci_unregister_proto);
1468
1469int hci_register_cb(struct hci_cb *cb)
1470{
1471 BT_DBG("%p name %s", cb, cb->name);
1472
1473 write_lock_bh(&hci_cb_list_lock);
1474 list_add(&cb->list, &hci_cb_list);
1475 write_unlock_bh(&hci_cb_list_lock);
1476
1477 return 0;
1478}
1479EXPORT_SYMBOL(hci_register_cb);
1480
1481int hci_unregister_cb(struct hci_cb *cb)
1482{
1483 BT_DBG("%p name %s", cb, cb->name);
1484
1485 write_lock_bh(&hci_cb_list_lock);
1486 list_del(&cb->list);
1487 write_unlock_bh(&hci_cb_list_lock);
1488
1489 return 0;
1490}
1491EXPORT_SYMBOL(hci_unregister_cb);
1492
1493static int hci_send_frame(struct sk_buff *skb)
1494{
1495 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1496
1497 if (!hdev) {
1498 kfree_skb(skb);
1499 return -ENODEV;
1500 }
1501
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001502 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
1504 if (atomic_read(&hdev->promisc)) {
1505 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001506 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001508 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 }
1510
1511 /* Get rid of skb owner, prior to sending to the driver. */
1512 skb_orphan(skb);
1513
1514 return hdev->send(skb);
1515}
1516
1517/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001518int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519{
1520 int len = HCI_COMMAND_HDR_SIZE + plen;
1521 struct hci_command_hdr *hdr;
1522 struct sk_buff *skb;
1523
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001524 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
1526 skb = bt_skb_alloc(len, GFP_ATOMIC);
1527 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001528 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 return -ENOMEM;
1530 }
1531
1532 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001533 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 hdr->plen = plen;
1535
1536 if (plen)
1537 memcpy(skb_put(skb, plen), param, plen);
1538
1539 BT_DBG("skb len %d", skb->len);
1540
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001541 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001543
Johan Hedberga5040ef2011-01-10 13:28:59 +02001544 if (test_bit(HCI_INIT, &hdev->flags))
1545 hdev->init_last_cmd = opcode;
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001548 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
1550 return 0;
1551}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552
1553/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001554void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555{
1556 struct hci_command_hdr *hdr;
1557
1558 if (!hdev->sent_cmd)
1559 return NULL;
1560
1561 hdr = (void *) hdev->sent_cmd->data;
1562
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001563 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 return NULL;
1565
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001566 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1569}
1570
1571/* Send ACL data */
1572static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1573{
1574 struct hci_acl_hdr *hdr;
1575 int len = skb->len;
1576
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001577 skb_push(skb, HCI_ACL_HDR_SIZE);
1578 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001579 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001580 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1581 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582}
1583
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001584void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585{
1586 struct hci_dev *hdev = conn->hdev;
1587 struct sk_buff *list;
1588
1589 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1590
1591 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001592 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001593 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001595 list = skb_shinfo(skb)->frag_list;
1596 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 /* Non fragmented */
1598 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1599
1600 skb_queue_tail(&conn->data_q, skb);
1601 } else {
1602 /* Fragmented */
1603 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1604
1605 skb_shinfo(skb)->frag_list = NULL;
1606
1607 /* Queue all fragments atomically */
1608 spin_lock_bh(&conn->data_q.lock);
1609
1610 __skb_queue_tail(&conn->data_q, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001611
1612 flags &= ~ACL_START;
1613 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 do {
1615 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001616
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001618 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001619 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
1621 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1622
1623 __skb_queue_tail(&conn->data_q, skb);
1624 } while (list);
1625
1626 spin_unlock_bh(&conn->data_q.lock);
1627 }
1628
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001629 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630}
1631EXPORT_SYMBOL(hci_send_acl);
1632
1633/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03001634void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635{
1636 struct hci_dev *hdev = conn->hdev;
1637 struct hci_sco_hdr hdr;
1638
1639 BT_DBG("%s len %d", hdev->name, skb->len);
1640
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001641 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 hdr.dlen = skb->len;
1643
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001644 skb_push(skb, HCI_SCO_HDR_SIZE);
1645 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001646 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001649 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001650
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001652 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653}
1654EXPORT_SYMBOL(hci_send_sco);
1655
1656/* ---- HCI TX task (outgoing data) ---- */
1657
1658/* HCI Connection scheduler */
1659static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1660{
1661 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001662 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 int num = 0, min = ~0;
1664 struct list_head *p;
1665
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001666 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 * added and removed with TX task disabled. */
1668 list_for_each(p, &h->list) {
1669 struct hci_conn *c;
1670 c = list_entry(p, struct hci_conn, list);
1671
Marcel Holtmann769be972008-07-14 20:13:49 +02001672 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001673 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02001674
1675 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1676 continue;
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 num++;
1679
1680 if (c->sent < min) {
1681 min = c->sent;
1682 conn = c;
1683 }
1684 }
1685
1686 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001687 int cnt, q;
1688
1689 switch (conn->type) {
1690 case ACL_LINK:
1691 cnt = hdev->acl_cnt;
1692 break;
1693 case SCO_LINK:
1694 case ESCO_LINK:
1695 cnt = hdev->sco_cnt;
1696 break;
1697 case LE_LINK:
1698 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1699 break;
1700 default:
1701 cnt = 0;
1702 BT_ERR("Unknown link type");
1703 }
1704
1705 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 *quote = q ? q : 1;
1707 } else
1708 *quote = 0;
1709
1710 BT_DBG("conn %p quote %d", conn, *quote);
1711 return conn;
1712}
1713
1714static inline void hci_acl_tx_to(struct hci_dev *hdev)
1715{
1716 struct hci_conn_hash *h = &hdev->conn_hash;
1717 struct list_head *p;
1718 struct hci_conn *c;
1719
1720 BT_ERR("%s ACL tx timeout", hdev->name);
1721
1722 /* Kill stalled connections */
1723 list_for_each(p, &h->list) {
1724 c = list_entry(p, struct hci_conn, list);
1725 if (c->type == ACL_LINK && c->sent) {
1726 BT_ERR("%s killing stalled ACL connection %s",
1727 hdev->name, batostr(&c->dst));
1728 hci_acl_disconn(c, 0x13);
1729 }
1730 }
1731}
1732
1733static inline void hci_sched_acl(struct hci_dev *hdev)
1734{
1735 struct hci_conn *conn;
1736 struct sk_buff *skb;
1737 int quote;
1738
1739 BT_DBG("%s", hdev->name);
1740
1741 if (!test_bit(HCI_RAW, &hdev->flags)) {
1742 /* ACL tx timeout must be longer than maximum
1743 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08001744 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 hci_acl_tx_to(hdev);
1746 }
1747
1748 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1749 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1750 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02001751
1752 hci_conn_enter_active_mode(conn);
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 hci_send_frame(skb);
1755 hdev->acl_last_tx = jiffies;
1756
1757 hdev->acl_cnt--;
1758 conn->sent++;
1759 }
1760 }
1761}
1762
1763/* Schedule SCO */
1764static inline void hci_sched_sco(struct hci_dev *hdev)
1765{
1766 struct hci_conn *conn;
1767 struct sk_buff *skb;
1768 int quote;
1769
1770 BT_DBG("%s", hdev->name);
1771
1772 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1773 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1774 BT_DBG("skb %p len %d", skb, skb->len);
1775 hci_send_frame(skb);
1776
1777 conn->sent++;
1778 if (conn->sent == ~0)
1779 conn->sent = 0;
1780 }
1781 }
1782}
1783
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001784static inline void hci_sched_esco(struct hci_dev *hdev)
1785{
1786 struct hci_conn *conn;
1787 struct sk_buff *skb;
1788 int quote;
1789
1790 BT_DBG("%s", hdev->name);
1791
1792 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1793 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1794 BT_DBG("skb %p len %d", skb, skb->len);
1795 hci_send_frame(skb);
1796
1797 conn->sent++;
1798 if (conn->sent == ~0)
1799 conn->sent = 0;
1800 }
1801 }
1802}
1803
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001804static inline void hci_sched_le(struct hci_dev *hdev)
1805{
1806 struct hci_conn *conn;
1807 struct sk_buff *skb;
1808 int quote, cnt;
1809
1810 BT_DBG("%s", hdev->name);
1811
1812 if (!test_bit(HCI_RAW, &hdev->flags)) {
1813 /* LE tx timeout must be longer than maximum
1814 * link supervision timeout (40.9 seconds) */
1815 if (!hdev->le_cnt &&
1816 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1817 hci_acl_tx_to(hdev);
1818 }
1819
1820 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1821 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1822 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1823 BT_DBG("skb %p len %d", skb, skb->len);
1824
1825 hci_send_frame(skb);
1826 hdev->le_last_tx = jiffies;
1827
1828 cnt--;
1829 conn->sent++;
1830 }
1831 }
1832 if (hdev->le_pkts)
1833 hdev->le_cnt = cnt;
1834 else
1835 hdev->acl_cnt = cnt;
1836}
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838static void hci_tx_task(unsigned long arg)
1839{
1840 struct hci_dev *hdev = (struct hci_dev *) arg;
1841 struct sk_buff *skb;
1842
1843 read_lock(&hci_task_lock);
1844
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001845 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1846 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
1848 /* Schedule queues and send stuff to HCI driver */
1849
1850 hci_sched_acl(hdev);
1851
1852 hci_sched_sco(hdev);
1853
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02001854 hci_sched_esco(hdev);
1855
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001856 hci_sched_le(hdev);
1857
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858 /* Send next queued raw (unknown type) packet */
1859 while ((skb = skb_dequeue(&hdev->raw_q)))
1860 hci_send_frame(skb);
1861
1862 read_unlock(&hci_task_lock);
1863}
1864
1865/* ----- HCI RX task (incoming data proccessing) ----- */
1866
1867/* ACL data packet */
1868static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1869{
1870 struct hci_acl_hdr *hdr = (void *) skb->data;
1871 struct hci_conn *conn;
1872 __u16 handle, flags;
1873
1874 skb_pull(skb, HCI_ACL_HDR_SIZE);
1875
1876 handle = __le16_to_cpu(hdr->handle);
1877 flags = hci_flags(handle);
1878 handle = hci_handle(handle);
1879
1880 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1881
1882 hdev->stat.acl_rx++;
1883
1884 hci_dev_lock(hdev);
1885 conn = hci_conn_hash_lookup_handle(hdev, handle);
1886 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 if (conn) {
1889 register struct hci_proto *hp;
1890
Marcel Holtmann04837f62006-07-03 10:02:33 +02001891 hci_conn_enter_active_mode(conn);
1892
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001894 hp = hci_proto[HCI_PROTO_L2CAP];
1895 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896 hp->recv_acldata(conn, skb, flags);
1897 return;
1898 }
1899 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001900 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 hdev->name, handle);
1902 }
1903
1904 kfree_skb(skb);
1905}
1906
1907/* SCO data packet */
1908static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1909{
1910 struct hci_sco_hdr *hdr = (void *) skb->data;
1911 struct hci_conn *conn;
1912 __u16 handle;
1913
1914 skb_pull(skb, HCI_SCO_HDR_SIZE);
1915
1916 handle = __le16_to_cpu(hdr->handle);
1917
1918 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1919
1920 hdev->stat.sco_rx++;
1921
1922 hci_dev_lock(hdev);
1923 conn = hci_conn_hash_lookup_handle(hdev, handle);
1924 hci_dev_unlock(hdev);
1925
1926 if (conn) {
1927 register struct hci_proto *hp;
1928
1929 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001930 hp = hci_proto[HCI_PROTO_SCO];
1931 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 hp->recv_scodata(conn, skb);
1933 return;
1934 }
1935 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001936 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 hdev->name, handle);
1938 }
1939
1940 kfree_skb(skb);
1941}
1942
Marcel Holtmann65164552005-10-28 19:20:48 +02001943static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 struct hci_dev *hdev = (struct hci_dev *) arg;
1946 struct sk_buff *skb;
1947
1948 BT_DBG("%s", hdev->name);
1949
1950 read_lock(&hci_task_lock);
1951
1952 while ((skb = skb_dequeue(&hdev->rx_q))) {
1953 if (atomic_read(&hdev->promisc)) {
1954 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001955 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957
1958 if (test_bit(HCI_RAW, &hdev->flags)) {
1959 kfree_skb(skb);
1960 continue;
1961 }
1962
1963 if (test_bit(HCI_INIT, &hdev->flags)) {
1964 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001965 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 case HCI_ACLDATA_PKT:
1967 case HCI_SCODATA_PKT:
1968 kfree_skb(skb);
1969 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 }
1972
1973 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001974 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 case HCI_EVENT_PKT:
1976 hci_event_packet(hdev, skb);
1977 break;
1978
1979 case HCI_ACLDATA_PKT:
1980 BT_DBG("%s ACL data packet", hdev->name);
1981 hci_acldata_packet(hdev, skb);
1982 break;
1983
1984 case HCI_SCODATA_PKT:
1985 BT_DBG("%s SCO data packet", hdev->name);
1986 hci_scodata_packet(hdev, skb);
1987 break;
1988
1989 default:
1990 kfree_skb(skb);
1991 break;
1992 }
1993 }
1994
1995 read_unlock(&hci_task_lock);
1996}
1997
1998static void hci_cmd_task(unsigned long arg)
1999{
2000 struct hci_dev *hdev = (struct hci_dev *) arg;
2001 struct sk_buff *skb;
2002
2003 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2004
S.Çağlar Onur82453022008-02-17 23:25:57 -08002005 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 BT_ERR("%s command tx timeout", hdev->name);
2007 atomic_set(&hdev->cmd_cnt, 1);
2008 }
2009
2010 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002011 if (atomic_read(&hdev->cmd_cnt)) {
2012 skb = skb_dequeue(&hdev->cmd_q);
2013 if (!skb)
2014 return;
2015
Wei Yongjun7585b972009-02-25 18:29:52 +08002016 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002018 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2019 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 atomic_dec(&hdev->cmd_cnt);
2021 hci_send_frame(skb);
2022 hdev->cmd_last_tx = jiffies;
2023 } else {
2024 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002025 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 }
2027 }
2028}