blob: 345b70f2e5c3948098c1adb19a85a156ab4f24b1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Brian Gix3cd62042012-01-11 15:18:17 -08003 Copyright (c) 2000-2001, 2010-2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Steve Mucklef132c6c2012-06-06 18:30:57 -070063static bool enable_smp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200218 }
219 skb_queue_purge(&hdev->driver_init);
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Mandatory initialization */
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200227 }
228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Sunny Kapdi93bef892012-07-30 14:52:56 -0700308
309 /* Read LE clear white list */
310 hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
311
312 /* Read LE white list size */
313 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300314}
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
317{
318 __u8 scan = opt;
319
320 BT_DBG("%s %x", hdev->name, scan);
321
322 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200323 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
326static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
327{
328 __u8 auth = opt;
329
330 BT_DBG("%s %x", hdev->name, auth);
331
332 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200333 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
336static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
337{
338 __u8 encrypt = opt;
339
340 BT_DBG("%s %x", hdev->name, encrypt);
341
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200342 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200343 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200346static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
347{
348 __le16 policy = cpu_to_le16(opt);
349
Marcel Holtmanna418b892008-11-30 12:17:28 +0100350 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200351
352 /* Default link policy */
353 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
354}
355
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900356/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * Device is held on return. */
358struct hci_dev *hci_dev_get(int index)
359{
360 struct hci_dev *hdev = NULL;
361 struct list_head *p;
362
363 BT_DBG("%d", index);
364
365 if (index < 0)
366 return NULL;
367
368 read_lock(&hci_dev_list_lock);
369 list_for_each(p, &hci_dev_list) {
370 struct hci_dev *d = list_entry(p, struct hci_dev, list);
371 if (d->id == index) {
372 hdev = hci_dev_hold(d);
373 break;
374 }
375 }
376 read_unlock(&hci_dev_list_lock);
377 return hdev;
378}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381/* ---- Inquiry support ---- */
382static void inquiry_cache_flush(struct hci_dev *hdev)
383{
384 struct inquiry_cache *cache = &hdev->inq_cache;
385 struct inquiry_entry *next = cache->list, *e;
386
387 BT_DBG("cache %p", cache);
388
389 cache->list = NULL;
390 while ((e = next)) {
391 next = e->next;
392 kfree(e);
393 }
394}
395
396struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
397{
398 struct inquiry_cache *cache = &hdev->inq_cache;
399 struct inquiry_entry *e;
400
401 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
402
403 for (e = cache->list; e; e = e->next)
404 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200405 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return e;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200407}
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
410{
411 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
415
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
417 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200419 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
420 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 return;
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200422
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200423 ie->next = cache->list;
424 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
426
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200427 memcpy(&ie->data, data, sizeof(*data));
428 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 cache->timestamp = jiffies;
430}
431
432static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
433{
434 struct inquiry_cache *cache = &hdev->inq_cache;
435 struct inquiry_info *info = (struct inquiry_info *) buf;
436 struct inquiry_entry *e;
437 int copied = 0;
438
439 for (e = cache->list; e && copied < num; e = e->next, copied++) {
440 struct inquiry_data *data = &e->data;
441 bacpy(&info->bdaddr, &data->bdaddr);
442 info->pscan_rep_mode = data->pscan_rep_mode;
443 info->pscan_period_mode = data->pscan_period_mode;
444 info->pscan_mode = data->pscan_mode;
445 memcpy(info->dev_class, data->dev_class, 3);
446 info->clock_offset = data->clock_offset;
447 info++;
448 }
449
450 BT_DBG("cache %p, copied %d", cache, copied);
451 return copied;
452}
453
454static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
455{
456 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
457 struct hci_cp_inquiry cp;
458
459 BT_DBG("%s", hdev->name);
460
461 if (test_bit(HCI_INQUIRY, &hdev->flags))
462 return;
463
464 /* Start Inquiry */
465 memcpy(&cp.lap, &ir->lap, 3);
466 cp.length = ir->length;
467 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200468 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
471int hci_inquiry(void __user *arg)
472{
473 __u8 __user *ptr = arg;
474 struct hci_inquiry_req ir;
475 struct hci_dev *hdev;
476 int err = 0, do_inquiry = 0, max_rsp;
477 long timeo;
478 __u8 *buf;
479
480 if (copy_from_user(&ir, ptr, sizeof(ir)))
481 return -EFAULT;
482
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200483 hdev = hci_dev_get(ir.dev_id);
484 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return -ENODEV;
486
487 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900488 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489 inquiry_cache_empty(hdev) ||
490 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 inquiry_cache_flush(hdev);
492 do_inquiry = 1;
493 }
494 hci_dev_unlock_bh(hdev);
495
Marcel Holtmann04837f62006-07-03 10:02:33 +0200496 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
498 if (do_inquiry) {
499 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
500 if (err < 0)
501 goto done;
502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 /* for unlimited number of responses we will use buffer with 255 entries */
505 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
506
507 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
508 * copy it to the user space.
509 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100510 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 err = -ENOMEM;
513 goto done;
514 }
515
516 hci_dev_lock_bh(hdev);
517 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
518 hci_dev_unlock_bh(hdev);
519
520 BT_DBG("num_rsp %d", ir.num_rsp);
521
522 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
523 ptr += sizeof(ir);
524 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
525 ir.num_rsp))
526 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900527 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 err = -EFAULT;
529
530 kfree(buf);
531
532done:
533 hci_dev_put(hdev);
534 return err;
535}
536
537/* ---- HCI ioctl helpers ---- */
538
539int hci_dev_open(__u16 dev)
540{
541 struct hci_dev *hdev;
542 int ret = 0;
543
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200544 hdev = hci_dev_get(dev);
545 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return -ENODEV;
547
548 BT_DBG("%s %p", hdev->name, hdev);
549
550 hci_req_lock(hdev);
551
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200552 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
553 ret = -ERFKILL;
554 goto done;
555 }
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (test_bit(HCI_UP, &hdev->flags)) {
558 ret = -EALREADY;
559 goto done;
560 }
561
562 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
563 set_bit(HCI_RAW, &hdev->flags);
564
565 if (hdev->open(hdev)) {
566 ret = -EIO;
567 goto done;
568 }
569
Bhasker Netia6e6a4f2012-01-27 15:25:43 +0530570 if (!skb_queue_empty(&hdev->cmd_q)) {
571 BT_ERR("command queue is not empty, purging");
572 skb_queue_purge(&hdev->cmd_q);
573 }
574 if (!skb_queue_empty(&hdev->rx_q)) {
575 BT_ERR("rx queue is not empty, purging");
576 skb_queue_purge(&hdev->rx_q);
577 }
578 if (!skb_queue_empty(&hdev->raw_q)) {
579 BT_ERR("raw queue is not empty, purging");
580 skb_queue_purge(&hdev->raw_q);
581 }
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (!test_bit(HCI_RAW, &hdev->flags)) {
584 atomic_set(&hdev->cmd_cnt, 1);
585 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200586 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Marcel Holtmann04837f62006-07-03 10:02:33 +0200588 ret = __hci_request(hdev, hci_init_req, 0,
589 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300592 ret = __hci_request(hdev, hci_le_init_req, 0,
593 msecs_to_jiffies(HCI_INIT_TIMEOUT));
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 clear_bit(HCI_INIT, &hdev->flags);
596 }
597
598 if (!ret) {
599 hci_dev_hold(hdev);
600 set_bit(HCI_UP, &hdev->flags);
601 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700602 if (!test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800603 hdev->dev_type == HCI_BREDR) {
604 hci_dev_lock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200605 mgmt_powered(hdev->id, 1);
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800606 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200607 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900608 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 /* Init failed, cleanup */
610 tasklet_kill(&hdev->rx_task);
611 tasklet_kill(&hdev->tx_task);
612 tasklet_kill(&hdev->cmd_task);
613
614 skb_queue_purge(&hdev->cmd_q);
615 skb_queue_purge(&hdev->rx_q);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 if (hdev->sent_cmd) {
621 kfree_skb(hdev->sent_cmd);
622 hdev->sent_cmd = NULL;
623 }
624
625 hdev->close(hdev);
626 hdev->flags = 0;
627 }
628
629done:
630 hci_req_unlock(hdev);
631 hci_dev_put(hdev);
632 return ret;
633}
634
Mat Martineau3b9239a2012-02-16 11:54:30 -0800635static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
Mat Martineau4106b992011-11-18 15:26:21 -0800637 unsigned long keepflags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 BT_DBG("%s %p", hdev->name, hdev);
Andre Guedes28b75a82012-02-03 17:48:00 -0300640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 hci_req_cancel(hdev, ENODEV);
642 hci_req_lock(hdev);
643
644 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300645 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 hci_req_unlock(hdev);
647 return 0;
648 }
649
650 /* Kill RX and TX tasks */
651 tasklet_kill(&hdev->rx_task);
652 tasklet_kill(&hdev->tx_task);
653
654 hci_dev_lock_bh(hdev);
655 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800656 hci_conn_hash_flush(hdev, is_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 hci_dev_unlock_bh(hdev);
658
659 hci_notify(hdev, HCI_DEV_DOWN);
660
Bhasker Netiffdff572011-12-21 17:24:01 -0800661 if (hdev->dev_type == HCI_BREDR) {
662 hci_dev_lock_bh(hdev);
663 mgmt_powered(hdev->id, 0);
664 hci_dev_unlock_bh(hdev);
665 }
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (hdev->flush)
668 hdev->flush(hdev);
669
670 /* Reset device */
671 skb_queue_purge(&hdev->cmd_q);
672 atomic_set(&hdev->cmd_cnt, 1);
673 if (!test_bit(HCI_RAW, &hdev->flags)) {
674 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200676 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 clear_bit(HCI_INIT, &hdev->flags);
678 }
679
680 /* Kill cmd task */
681 tasklet_kill(&hdev->cmd_task);
682
683 /* Drop queues */
684 skb_queue_purge(&hdev->rx_q);
685 skb_queue_purge(&hdev->cmd_q);
686 skb_queue_purge(&hdev->raw_q);
687
688 /* Drop last sent command */
689 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300690 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 kfree_skb(hdev->sent_cmd);
692 hdev->sent_cmd = NULL;
693 }
694
695 /* After this point our queues are empty
696 * and no tasks are scheduled. */
697 hdev->close(hdev);
698
Mat Martineau4106b992011-11-18 15:26:21 -0800699 /* Clear only non-persistent flags */
700 if (test_bit(HCI_MGMT, &hdev->flags))
701 set_bit(HCI_MGMT, &keepflags);
702 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
703 set_bit(HCI_LINK_KEYS, &keepflags);
704 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
705 set_bit(HCI_DEBUG_KEYS, &keepflags);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200706
Mat Martineau4106b992011-11-18 15:26:21 -0800707 hdev->flags = keepflags;
Johan Hedberge59fda82012-02-22 18:11:53 +0200708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 hci_req_unlock(hdev);
710
711 hci_dev_put(hdev);
712 return 0;
713}
714
715int hci_dev_close(__u16 dev)
716{
717 struct hci_dev *hdev;
718 int err;
719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200720 hdev = hci_dev_get(dev);
721 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 return -ENODEV;
Mat Martineau3b9239a2012-02-16 11:54:30 -0800723 err = hci_dev_do_close(hdev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 hci_dev_put(hdev);
725 return err;
726}
727
728int hci_dev_reset(__u16 dev)
729{
730 struct hci_dev *hdev;
731 int ret = 0;
732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200733 hdev = hci_dev_get(dev);
734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -ENODEV;
736
737 hci_req_lock(hdev);
738 tasklet_disable(&hdev->tx_task);
739
740 if (!test_bit(HCI_UP, &hdev->flags))
741 goto done;
742
743 /* Drop queues */
744 skb_queue_purge(&hdev->rx_q);
745 skb_queue_purge(&hdev->cmd_q);
746
747 hci_dev_lock_bh(hdev);
748 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800749 hci_conn_hash_flush(hdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 hci_dev_unlock_bh(hdev);
751
752 if (hdev->flush)
753 hdev->flush(hdev);
754
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900755 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300756 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200759 ret = __hci_request(hdev, hci_reset_req, 0,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762done:
763 tasklet_enable(&hdev->tx_task);
764 hci_req_unlock(hdev);
765 hci_dev_put(hdev);
766 return ret;
767}
768
769int hci_dev_reset_stat(__u16 dev)
770{
771 struct hci_dev *hdev;
772 int ret = 0;
773
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200774 hdev = hci_dev_get(dev);
775 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 return -ENODEV;
777
778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
779
780 hci_dev_put(hdev);
781
782 return ret;
783}
784
785int hci_dev_cmd(unsigned int cmd, void __user *arg)
786{
787 struct hci_dev *hdev;
788 struct hci_dev_req dr;
789 int err = 0;
790
791 if (copy_from_user(&dr, arg, sizeof(dr)))
792 return -EFAULT;
793
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200794 hdev = hci_dev_get(dr.dev_id);
795 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return -ENODEV;
797
798 switch (cmd) {
799 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200800 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
801 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 break;
803
804 case HCISETENCRYPT:
805 if (!lmp_encrypt_capable(hdev)) {
806 err = -EOPNOTSUPP;
807 break;
808 }
809
810 if (!test_bit(HCI_AUTH, &hdev->flags)) {
811 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200812 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
813 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (err)
815 break;
816 }
817
Marcel Holtmann04837f62006-07-03 10:02:33 +0200818 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
819 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 break;
821
822 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200823 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
824 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 break;
826
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200827 case HCISETLINKPOL:
828 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
829 msecs_to_jiffies(HCI_INIT_TIMEOUT));
830 break;
831
832 case HCISETLINKMODE:
833 hdev->link_mode = ((__u16) dr.dev_opt) &
834 (HCI_LM_MASTER | HCI_LM_ACCEPT);
835 break;
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 case HCISETPTYPE:
838 hdev->pkt_type = (__u16) dr.dev_opt;
839 break;
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200842 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
843 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 break;
845
846 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200847 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
848 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 break;
850
851 default:
852 err = -EINVAL;
853 break;
854 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 hci_dev_put(hdev);
857 return err;
858}
859
860int hci_get_dev_list(void __user *arg)
861{
862 struct hci_dev_list_req *dl;
863 struct hci_dev_req *dr;
864 struct list_head *p;
865 int n = 0, size, err;
866 __u16 dev_num;
867
868 if (get_user(dev_num, (__u16 __user *) arg))
869 return -EFAULT;
870
871 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
872 return -EINVAL;
873
874 size = sizeof(*dl) + dev_num * sizeof(*dr);
875
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 dl = kzalloc(size, GFP_KERNEL);
877 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENOMEM;
879
880 dr = dl->dev_req;
881
882 read_lock_bh(&hci_dev_list_lock);
883 list_for_each(p, &hci_dev_list) {
884 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 hdev = list_entry(p, struct hci_dev, list);
887
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200888 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200889
890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 (dr + n)->dev_id = hdev->id;
894 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (++n >= dev_num)
897 break;
898 }
899 read_unlock_bh(&hci_dev_list_lock);
900
901 dl->dev_num = n;
902 size = sizeof(*dl) + n * sizeof(*dr);
903
904 err = copy_to_user(arg, dl, size);
905 kfree(dl);
906
907 return err ? -EFAULT : 0;
908}
909
910int hci_get_dev_info(void __user *arg)
911{
912 struct hci_dev *hdev;
913 struct hci_dev_info di;
914 int err = 0;
915
916 if (copy_from_user(&di, arg, sizeof(di)))
917 return -EFAULT;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 hdev = hci_dev_get(di.dev_id);
920 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 return -ENODEV;
922
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200923 hci_del_off_timer(hdev);
924
Johan Hedbergc542a062011-01-26 13:11:03 +0200925 if (!test_bit(HCI_MGMT, &hdev->flags))
926 set_bit(HCI_PAIRABLE, &hdev->flags);
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 strcpy(di.name, hdev->name);
929 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100930 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 di.flags = hdev->flags;
932 di.pkt_type = hdev->pkt_type;
933 di.acl_mtu = hdev->acl_mtu;
934 di.acl_pkts = hdev->acl_pkts;
935 di.sco_mtu = hdev->sco_mtu;
936 di.sco_pkts = hdev->sco_pkts;
937 di.link_policy = hdev->link_policy;
938 di.link_mode = hdev->link_mode;
939
940 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
941 memcpy(&di.features, &hdev->features, sizeof(di.features));
942
943 if (copy_to_user(arg, &di, sizeof(di)))
944 err = -EFAULT;
945
946 hci_dev_put(hdev);
947
948 return err;
949}
950
951/* ---- Interface to HCI drivers ---- */
952
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200953static int hci_rfkill_set_block(void *data, bool blocked)
954{
955 struct hci_dev *hdev = data;
956
957 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
958
959 if (!blocked)
960 return 0;
961
Mat Martineau3b9239a2012-02-16 11:54:30 -0800962 hci_dev_do_close(hdev, 0);
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200963
964 return 0;
965}
966
967static const struct rfkill_ops hci_rfkill_ops = {
968 .set_block = hci_rfkill_set_block,
969};
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/* Alloc HCI device */
972struct hci_dev *hci_alloc_dev(void)
973{
974 struct hci_dev *hdev;
975
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200976 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (!hdev)
978 return NULL;
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 skb_queue_head_init(&hdev->driver_init);
981
982 return hdev;
983}
984EXPORT_SYMBOL(hci_alloc_dev);
985
986/* Free HCI device */
987void hci_free_dev(struct hci_dev *hdev)
988{
989 skb_queue_purge(&hdev->driver_init);
990
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200991 /* will free via device release */
992 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993}
994EXPORT_SYMBOL(hci_free_dev);
995
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200996static void hci_power_on(struct work_struct *work)
997{
998 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700999 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001000
1001 BT_DBG("%s", hdev->name);
1002
Inga Stotland5029fc22011-09-12 15:22:52 -07001003 err = hci_dev_open(hdev->id);
1004 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001005 return;
1006
Peter Krystad1fc44072011-08-30 15:38:12 -07001007 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
1008 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001009 mod_timer(&hdev->off_timer,
1010 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1011
Peter Krystad1fc44072011-08-30 15:38:12 -07001012 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
1013 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001014 mgmt_index_added(hdev->id);
1015}
1016
1017static void hci_power_off(struct work_struct *work)
1018{
1019 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
1020
1021 BT_DBG("%s", hdev->name);
1022
1023 hci_dev_close(hdev->id);
1024}
1025
1026static void hci_auto_off(unsigned long data)
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001027{
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001028 struct hci_dev *hdev = (struct hci_dev *) data;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001029
1030 BT_DBG("%s", hdev->name);
1031
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001032 clear_bit(HCI_AUTO_OFF, &hdev->flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001033
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001034 queue_work(hdev->workqueue, &hdev->power_off);
1035}
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001036
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001037void hci_del_off_timer(struct hci_dev *hdev)
1038{
1039 BT_DBG("%s", hdev->name);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001040
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001041 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1042 del_timer(&hdev->off_timer);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001043}
1044
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001045int hci_uuids_clear(struct hci_dev *hdev)
1046{
1047 struct list_head *p, *n;
1048
1049 list_for_each_safe(p, n, &hdev->uuids) {
1050 struct bt_uuid *uuid;
1051
1052 uuid = list_entry(p, struct bt_uuid, list);
1053
1054 list_del(p);
1055 kfree(uuid);
1056 }
1057
1058 return 0;
1059}
1060
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001061int hci_link_keys_clear(struct hci_dev *hdev)
1062{
1063 struct list_head *p, *n;
1064
1065 list_for_each_safe(p, n, &hdev->link_keys) {
1066 struct link_key *key;
1067
1068 key = list_entry(p, struct link_key, list);
1069
1070 list_del(p);
1071 kfree(key);
1072 }
1073
1074 return 0;
1075}
1076
1077struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1078{
1079 struct list_head *p;
1080
1081 list_for_each(p, &hdev->link_keys) {
1082 struct link_key *k;
1083
1084 k = list_entry(p, struct link_key, list);
1085
1086 if (bacmp(bdaddr, &k->bdaddr) == 0)
1087 return k;
1088 }
1089
1090 return NULL;
1091}
1092
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001093struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001094{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 struct list_head *p;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 list_for_each(p, &hdev->link_keys) {
1098 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001099 struct key_master_id *id;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 k = list_entry(p, struct link_key, list);
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001102
Brian Gixcf956772011-10-20 15:18:51 -07001103 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001104 continue;
1105
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001106 if (k->dlen != sizeof(*id))
1107 continue;
1108
1109 id = (void *) &k->data;
1110 if (id->ediv == ediv &&
1111 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1112 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001113 }
1114
1115 return NULL;
1116}
1117EXPORT_SYMBOL(hci_find_ltk);
1118
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001119struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1120 bdaddr_t *bdaddr, u8 type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001121{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 struct list_head *p;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 list_for_each(p, &hdev->link_keys) {
1125 struct link_key *k;
1126
1127 k = list_entry(p, struct link_key, list);
1128
Brian Gixcf956772011-10-20 15:18:51 -07001129 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001130 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 }
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001132
1133 return NULL;
1134}
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001135EXPORT_SYMBOL(hci_find_link_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1138 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001139{
1140 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001141 struct hci_conn *conn;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301142 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001143 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001144
1145 old_key = hci_find_link_key(hdev, bdaddr);
1146 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001147 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001148 key = old_key;
1149 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001151 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1152 if (!key)
1153 return -ENOMEM;
1154 list_add(&key->list, &hdev->link_keys);
1155 }
1156
1157 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1158
1159 bacpy(&key->bdaddr, bdaddr);
1160 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001161 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001162 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001163 key->pin_len = pin_len;
1164
Brian Gixa68668b2011-08-11 15:49:36 -07001165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301166 /* Store the link key persistently if one of the following is true:
1167 * 1. the remote side is using dedicated bonding since in that case
1168 * also the local requirements are set to dedicated bonding
1169 * 2. the local side had dedicated bonding as a requirement
1170 * 3. this is a legacy link key
1171 * 4. this is a changed combination key and there was a previously
1172 * stored one
1173 * If none of the above match only keep the link key around for
1174 * this connection and set the temporary flag for the device.
1175 */
Johan Hedberg4748fed2011-04-28 11:29:02 -07001176
Brian Gixdfdd9362011-08-18 09:58:02 -07001177 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301178 if ((conn->remote_auth > 0x01) ||
1179 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001180 (key->key_type < 0x03) ||
1181 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001182 bonded = 1;
1183 }
Johan Hedberg4df378a2011-04-28 11:29:03 -07001184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001186 mgmt_new_key(hdev->id, key, bonded);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001189 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001190
1191 return 0;
1192}
1193
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001194int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001195 u8 addr_type, u8 key_size, u8 auth,
1196 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001197{
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001198 struct link_key *key, *old_key;
1199 struct key_master_id *id;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001200
Brian Gixcf956772011-10-20 15:18:51 -07001201 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1202 batostr(bdaddr), addr_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001205 if (old_key) {
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001206 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001207 } else {
1208 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001209 if (!key)
1210 return -ENOMEM;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001211 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001212 }
1213
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001214 key->dlen = sizeof(*id);
1215
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001217 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001218 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001219 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001220 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001221 key->auth = auth;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001222
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001223 id = (void *) &key->data;
1224 id->ediv = ediv;
1225 memcpy(id->rand, rand, sizeof(id->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001227 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001228 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001229
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230 return 0;
1231}
1232
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001233int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234{
1235 struct link_key *key;
1236
1237 key = hci_find_link_key(hdev, bdaddr);
1238 if (!key)
1239 return -ENOENT;
1240
1241 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1242
1243 list_del(&key->list);
1244 kfree(key);
1245
1246 return 0;
1247}
1248
Ville Tervo6bd32322011-02-16 16:32:41 +02001249/* HCI command timer function */
1250static void hci_cmd_timer(unsigned long arg)
1251{
1252 struct hci_dev *hdev = (void *) arg;
1253
1254 BT_ERR("%s command tx timeout", hdev->name);
1255 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001257 tasklet_schedule(&hdev->cmd_task);
1258}
1259
Szymon Janc2763eda2011-03-22 13:12:22 +01001260struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1261 bdaddr_t *bdaddr)
1262{
1263 struct oob_data *data;
1264
1265 list_for_each_entry(data, &hdev->remote_oob_data, list)
1266 if (bacmp(bdaddr, &data->bdaddr) == 0)
1267 return data;
1268
1269 return NULL;
1270}
1271
1272int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1273{
1274 struct oob_data *data;
1275
1276 data = hci_find_remote_oob_data(hdev, bdaddr);
1277 if (!data)
1278 return -ENOENT;
1279
1280 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1281
1282 list_del(&data->list);
1283 kfree(data);
1284
1285 return 0;
1286}
1287
1288int hci_remote_oob_data_clear(struct hci_dev *hdev)
1289{
1290 struct oob_data *data, *n;
1291
1292 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1293 list_del(&data->list);
1294 kfree(data);
1295 }
1296
1297 return 0;
1298}
1299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300static void hci_adv_clear(unsigned long arg)
1301{
1302 struct hci_dev *hdev = (void *) arg;
1303
1304 hci_adv_entries_clear(hdev);
1305}
1306
1307int hci_adv_entries_clear(struct hci_dev *hdev)
1308{
1309 struct list_head *p, *n;
1310
Brian Gixa68668b2011-08-11 15:49:36 -07001311 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 write_lock_bh(&hdev->adv_entries_lock);
1313
1314 list_for_each_safe(p, n, &hdev->adv_entries) {
1315 struct adv_entry *entry;
1316
1317 entry = list_entry(p, struct adv_entry, list);
1318
1319 list_del(p);
1320 kfree(entry);
1321 }
1322
1323 write_unlock_bh(&hdev->adv_entries_lock);
1324
1325 return 0;
1326}
1327
1328struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1329{
1330 struct list_head *p;
1331 struct adv_entry *res = NULL;
1332
Brian Gixa68668b2011-08-11 15:49:36 -07001333 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 read_lock_bh(&hdev->adv_entries_lock);
1335
1336 list_for_each(p, &hdev->adv_entries) {
1337 struct adv_entry *entry;
1338
1339 entry = list_entry(p, struct adv_entry, list);
1340
1341 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1342 res = entry;
1343 goto out;
1344 }
1345 }
1346out:
1347 read_unlock_bh(&hdev->adv_entries_lock);
1348 return res;
1349}
1350
1351static inline int is_connectable_adv(u8 evt_type)
1352{
1353 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1354 return 1;
1355
1356 return 0;
1357}
1358
Szymon Janc2763eda2011-03-22 13:12:22 +01001359int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1360 u8 *randomizer)
1361{
1362 struct oob_data *data;
1363
1364 data = hci_find_remote_oob_data(hdev, bdaddr);
1365
1366 if (!data) {
1367 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1368 if (!data)
1369 return -ENOMEM;
1370
1371 bacpy(&data->bdaddr, bdaddr);
1372 list_add(&data->list, &hdev->remote_oob_data);
1373 }
1374
1375 memcpy(data->hash, hash, sizeof(data->hash));
1376 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1377
1378 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1379
1380 return 0;
1381}
1382
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001383int hci_add_adv_entry(struct hci_dev *hdev,
1384 struct hci_ev_le_advertising_info *ev)
Andre Guedes76c86862011-05-26 16:23:50 -03001385{
1386 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001387 u8 flags = 0;
1388 int i;
Andre Guedes76c86862011-05-26 16:23:50 -03001389
Brian Gixa68668b2011-08-11 15:49:36 -07001390 BT_DBG("");
Andre Guedes76c86862011-05-26 16:23:50 -03001391
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001392 if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001393 return -EINVAL;
1394
Brian Gixfdd38922011-09-28 16:23:48 -07001395 if (ev->data && ev->length) {
1396 for (i = 0; (i + 2) < ev->length; i++)
1397 if (ev->data[i+1] == 0x01) {
1398 flags = ev->data[i+2];
1399 BT_DBG("flags: %2.2x", flags);
1400 break;
1401 } else {
1402 i += ev->data[i];
1403 }
1404 }
1405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes76c86862011-05-26 16:23:50 -03001407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001409 if (entry) {
1410 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001411 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001412 }
Andre Guedes76c86862011-05-26 16:23:50 -03001413
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001414 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
Andre Guedes76c86862011-05-26 16:23:50 -03001415 if (!entry)
1416 return -ENOMEM;
1417
1418 bacpy(&entry->bdaddr, &ev->bdaddr);
1419 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001420 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001421
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 write_lock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001423 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 write_unlock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001425
1426 return 0;
1427}
1428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429static struct crypto_blkcipher *alloc_cypher(void)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001430{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 if (enable_smp)
1432 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 return ERR_PTR(-ENOTSUPP);
Andre Guedes28b75a82012-02-03 17:48:00 -03001435}
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437/* Register HCI device */
1438int hci_register_dev(struct hci_dev *hdev)
1439{
1440 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001441 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001443 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1444 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 if (!hdev->open || !hdev->close || !hdev->destruct)
1447 return -EINVAL;
1448
Mat Martineau08add512011-11-02 16:18:36 -07001449 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 write_lock_bh(&hci_dev_list_lock);
1452
1453 /* Find first available device id */
1454 list_for_each(p, &hci_dev_list) {
1455 if (list_entry(p, struct hci_dev, list)->id != id)
1456 break;
1457 head = p; id++;
1458 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 sprintf(hdev->name, "hci%d", id);
1461 hdev->id = id;
1462 list_add(&hdev->list, head);
1463
1464 atomic_set(&hdev->refcnt, 1);
1465 spin_lock_init(&hdev->lock);
1466
1467 hdev->flags = 0;
1468 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001469 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001471 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Marcel Holtmann04837f62006-07-03 10:02:33 +02001473 hdev->idle_timeout = 0;
1474 hdev->sniff_max_interval = 800;
1475 hdev->sniff_min_interval = 80;
1476
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001477 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1479 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1480
1481 skb_queue_head_init(&hdev->rx_q);
1482 skb_queue_head_init(&hdev->cmd_q);
1483 skb_queue_head_init(&hdev->raw_q);
1484
Ville Tervo6bd32322011-02-16 16:32:41 +02001485 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
Brian Gix568dde92012-01-11 16:18:04 -08001486 setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
1487 (unsigned long) hdev);
1488 setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
1489 (unsigned long) hdev);
Ville Tervo6bd32322011-02-16 16:32:41 +02001490
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301491 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001492 hdev->reassembly[i] = NULL;
1493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001495 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 inquiry_cache_init(hdev);
1498
1499 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001500 hci_chan_list_init(hdev);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001501
David Millerea4bd8b2010-07-30 21:54:49 -07001502 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001503
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001504 INIT_LIST_HEAD(&hdev->uuids);
1505
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001506 INIT_LIST_HEAD(&hdev->link_keys);
1507
Szymon Janc2763eda2011-03-22 13:12:22 +01001508 INIT_LIST_HEAD(&hdev->remote_oob_data);
1509
Andre Guedes76c86862011-05-26 16:23:50 -03001510 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511 rwlock_init(&hdev->adv_entries_lock);
1512 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001513
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001514 INIT_WORK(&hdev->power_on, hci_power_on);
1515 INIT_WORK(&hdev->power_off, hci_power_off);
1516 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1519
1520 atomic_set(&hdev->promisc, 0);
1521
1522 write_unlock_bh(&hci_dev_list_lock);
Andre Guedes28b75a82012-02-03 17:48:00 -03001523
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001524 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1525 if (!hdev->workqueue)
1526 goto nomem;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001528 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001529 if (IS_ERR(hdev->tfm))
1530 BT_INFO("Failed to load transform for ecb(aes): %ld",
1531 PTR_ERR(hdev->tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
1533 hci_register_sysfs(hdev);
1534
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001535 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1536 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1537 if (hdev->rfkill) {
1538 if (rfkill_register(hdev->rfkill) < 0) {
1539 rfkill_destroy(hdev->rfkill);
1540 hdev->rfkill = NULL;
1541 }
1542 }
1543
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001544 set_bit(HCI_AUTO_OFF, &hdev->flags);
1545 set_bit(HCI_SETUP, &hdev->flags);
1546 queue_work(hdev->workqueue, &hdev->power_on);
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 hci_notify(hdev, HCI_DEV_REG);
1549
1550 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001551
1552nomem:
1553 write_lock_bh(&hci_dev_list_lock);
1554 list_del(&hdev->list);
1555 write_unlock_bh(&hci_dev_list_lock);
1556
1557 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558}
1559EXPORT_SYMBOL(hci_register_dev);
1560
1561/* Unregister HCI device */
1562int hci_unregister_dev(struct hci_dev *hdev)
1563{
Marcel Holtmannef222012007-07-11 06:42:04 +02001564 int i;
1565
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001566 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568 write_lock_bh(&hci_dev_list_lock);
1569 list_del(&hdev->list);
1570 write_unlock_bh(&hci_dev_list_lock);
1571
Ankit Vermad3e99df2012-04-25 15:42:04 -07001572 hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301574 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001575 kfree_skb(hdev->reassembly[i]);
1576
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001577 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001578 !test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001579 hdev->dev_type == HCI_BREDR) {
1580 hci_dev_lock_bh(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001581 mgmt_index_removed(hdev->id);
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001582 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001583 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001584
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001585 if (!IS_ERR(hdev->tfm))
1586 crypto_free_blkcipher(hdev->tfm);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 hci_notify(hdev, HCI_DEV_UNREG);
1589
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001590 if (hdev->rfkill) {
1591 rfkill_unregister(hdev->rfkill);
1592 rfkill_destroy(hdev->rfkill);
1593 }
1594
Dave Young147e2d52008-03-05 18:45:59 -08001595 hci_unregister_sysfs(hdev);
1596
Brian Gix3cd62042012-01-11 15:18:17 -08001597 /* Disable all timers */
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001598 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001599 del_timer(&hdev->adv_timer);
Brian Gix3cd62042012-01-11 15:18:17 -08001600 del_timer(&hdev->cmd_timer);
Brian Gix568dde92012-01-11 16:18:04 -08001601 del_timer(&hdev->disco_timer);
1602 del_timer(&hdev->disco_le_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001603
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001604 destroy_workqueue(hdev->workqueue);
1605
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001606 hci_dev_lock_bh(hdev);
1607 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001608 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001609 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001610 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001611 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001612 hci_dev_unlock_bh(hdev);
1613
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001615
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 return 0;
1617}
1618EXPORT_SYMBOL(hci_unregister_dev);
1619
1620/* Suspend HCI device */
1621int hci_suspend_dev(struct hci_dev *hdev)
1622{
1623 hci_notify(hdev, HCI_DEV_SUSPEND);
1624 return 0;
1625}
1626EXPORT_SYMBOL(hci_suspend_dev);
1627
1628/* Resume HCI device */
1629int hci_resume_dev(struct hci_dev *hdev)
1630{
1631 hci_notify(hdev, HCI_DEV_RESUME);
1632 return 0;
1633}
1634EXPORT_SYMBOL(hci_resume_dev);
1635
Marcel Holtmann76bca882009-11-18 00:40:39 +01001636/* Receive frame from HCI drivers */
1637int hci_recv_frame(struct sk_buff *skb)
1638{
1639 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1640 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1641 && !test_bit(HCI_INIT, &hdev->flags))) {
1642 kfree_skb(skb);
1643 return -ENXIO;
1644 }
1645
1646 /* Incomming skb */
1647 bt_cb(skb)->incoming = 1;
1648
1649 /* Time stamp */
1650 __net_timestamp(skb);
1651
1652 /* Queue frame for rx task */
1653 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001654 tasklet_schedule(&hdev->rx_task);
1655
Marcel Holtmann76bca882009-11-18 00:40:39 +01001656 return 0;
1657}
1658EXPORT_SYMBOL(hci_recv_frame);
1659
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301660static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001661 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301662{
1663 int len = 0;
1664 int hlen = 0;
1665 int remain = count;
1666 struct sk_buff *skb;
1667 struct bt_skb_cb *scb;
1668
1669 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1670 index >= NUM_REASSEMBLY)
1671 return -EILSEQ;
1672
1673 skb = hdev->reassembly[index];
1674
1675 if (!skb) {
1676 switch (type) {
1677 case HCI_ACLDATA_PKT:
1678 len = HCI_MAX_FRAME_SIZE;
1679 hlen = HCI_ACL_HDR_SIZE;
1680 break;
1681 case HCI_EVENT_PKT:
1682 len = HCI_MAX_EVENT_SIZE;
1683 hlen = HCI_EVENT_HDR_SIZE;
1684 break;
1685 case HCI_SCODATA_PKT:
1686 len = HCI_MAX_SCO_SIZE;
1687 hlen = HCI_SCO_HDR_SIZE;
1688 break;
1689 }
1690
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001691 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301692 if (!skb)
1693 return -ENOMEM;
1694
1695 scb = (void *) skb->cb;
1696 scb->expect = hlen;
1697 scb->pkt_type = type;
1698
1699 skb->dev = (void *) hdev;
1700 hdev->reassembly[index] = skb;
1701 }
1702
1703 while (count) {
1704 scb = (void *) skb->cb;
1705 len = min(scb->expect, (__u16)count);
1706
1707 memcpy(skb_put(skb, len), data, len);
1708
1709 count -= len;
1710 data += len;
1711 scb->expect -= len;
1712 remain = count;
1713
1714 switch (type) {
1715 case HCI_EVENT_PKT:
1716 if (skb->len == HCI_EVENT_HDR_SIZE) {
1717 struct hci_event_hdr *h = hci_event_hdr(skb);
1718 scb->expect = h->plen;
1719
1720 if (skb_tailroom(skb) < scb->expect) {
1721 kfree_skb(skb);
1722 hdev->reassembly[index] = NULL;
1723 return -ENOMEM;
1724 }
1725 }
1726 break;
1727
1728 case HCI_ACLDATA_PKT:
1729 if (skb->len == HCI_ACL_HDR_SIZE) {
1730 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1731 scb->expect = __le16_to_cpu(h->dlen);
1732
1733 if (skb_tailroom(skb) < scb->expect) {
1734 kfree_skb(skb);
1735 hdev->reassembly[index] = NULL;
1736 return -ENOMEM;
1737 }
1738 }
1739 break;
1740
1741 case HCI_SCODATA_PKT:
1742 if (skb->len == HCI_SCO_HDR_SIZE) {
1743 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1744 scb->expect = h->dlen;
1745
1746 if (skb_tailroom(skb) < scb->expect) {
1747 kfree_skb(skb);
1748 hdev->reassembly[index] = NULL;
1749 return -ENOMEM;
1750 }
1751 }
1752 break;
1753 }
1754
1755 if (scb->expect == 0) {
1756 /* Complete frame */
1757
1758 bt_cb(skb)->pkt_type = type;
1759 hci_recv_frame(skb);
1760
1761 hdev->reassembly[index] = NULL;
1762 return remain;
1763 }
1764 }
1765
1766 return remain;
1767}
1768
Marcel Holtmannef222012007-07-11 06:42:04 +02001769int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1770{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301771 int rem = 0;
1772
Marcel Holtmannef222012007-07-11 06:42:04 +02001773 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1774 return -EILSEQ;
1775
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001776 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001777 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301778 if (rem < 0)
1779 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001780
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301781 data += (count - rem);
1782 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001783 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001784
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301785 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001786}
1787EXPORT_SYMBOL(hci_recv_fragment);
1788
Suraj Sumangala99811512010-07-14 13:02:19 +05301789#define STREAM_REASSEMBLY 0
1790
1791int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1792{
1793 int type;
1794 int rem = 0;
1795
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001796 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301797 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1798
1799 if (!skb) {
1800 struct { char type; } *pkt;
1801
1802 /* Start of the frame */
1803 pkt = data;
1804 type = pkt->type;
1805
1806 data++;
1807 count--;
1808 } else
1809 type = bt_cb(skb)->pkt_type;
1810
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001811 rem = hci_reassembly(hdev, type, data, count,
1812 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301813 if (rem < 0)
1814 return rem;
1815
1816 data += (count - rem);
1817 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001818 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301819
1820 return rem;
1821}
1822EXPORT_SYMBOL(hci_recv_stream_fragment);
1823
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824/* ---- Interface to upper protocols ---- */
1825
1826/* Register/Unregister protocols.
1827 * hci_task_lock is used to ensure that no tasks are running. */
1828int hci_register_proto(struct hci_proto *hp)
1829{
1830 int err = 0;
1831
1832 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1833
1834 if (hp->id >= HCI_MAX_PROTO)
1835 return -EINVAL;
1836
1837 write_lock_bh(&hci_task_lock);
1838
1839 if (!hci_proto[hp->id])
1840 hci_proto[hp->id] = hp;
1841 else
1842 err = -EEXIST;
1843
1844 write_unlock_bh(&hci_task_lock);
1845
1846 return err;
1847}
1848EXPORT_SYMBOL(hci_register_proto);
1849
1850int hci_unregister_proto(struct hci_proto *hp)
1851{
1852 int err = 0;
1853
1854 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1855
1856 if (hp->id >= HCI_MAX_PROTO)
1857 return -EINVAL;
1858
1859 write_lock_bh(&hci_task_lock);
1860
1861 if (hci_proto[hp->id])
1862 hci_proto[hp->id] = NULL;
1863 else
1864 err = -ENOENT;
1865
1866 write_unlock_bh(&hci_task_lock);
1867
1868 return err;
1869}
1870EXPORT_SYMBOL(hci_unregister_proto);
1871
1872int hci_register_cb(struct hci_cb *cb)
1873{
1874 BT_DBG("%p name %s", cb, cb->name);
1875
1876 write_lock_bh(&hci_cb_list_lock);
1877 list_add(&cb->list, &hci_cb_list);
1878 write_unlock_bh(&hci_cb_list_lock);
1879
1880 return 0;
1881}
1882EXPORT_SYMBOL(hci_register_cb);
1883
1884int hci_unregister_cb(struct hci_cb *cb)
1885{
1886 BT_DBG("%p name %s", cb, cb->name);
1887
1888 write_lock_bh(&hci_cb_list_lock);
1889 list_del(&cb->list);
1890 write_unlock_bh(&hci_cb_list_lock);
1891
1892 return 0;
1893}
1894EXPORT_SYMBOL(hci_unregister_cb);
1895
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001896int hci_register_amp(struct amp_mgr_cb *cb)
1897{
1898 BT_DBG("%p", cb);
1899
1900 write_lock_bh(&amp_mgr_cb_list_lock);
1901 list_add(&cb->list, &amp_mgr_cb_list);
1902 write_unlock_bh(&amp_mgr_cb_list_lock);
1903
1904 return 0;
1905}
1906EXPORT_SYMBOL(hci_register_amp);
1907
1908int hci_unregister_amp(struct amp_mgr_cb *cb)
1909{
1910 BT_DBG("%p", cb);
1911
1912 write_lock_bh(&amp_mgr_cb_list_lock);
1913 list_del(&cb->list);
1914 write_unlock_bh(&amp_mgr_cb_list_lock);
1915
1916 return 0;
1917}
1918EXPORT_SYMBOL(hci_unregister_amp);
1919
1920void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1921 struct sk_buff *skb)
1922{
1923 struct amp_mgr_cb *cb;
1924
1925 BT_DBG("opcode 0x%x", opcode);
1926
1927 read_lock_bh(&amp_mgr_cb_list_lock);
1928 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1929 if (cb->amp_cmd_complete_event)
1930 cb->amp_cmd_complete_event(hdev, opcode, skb);
1931 }
1932 read_unlock_bh(&amp_mgr_cb_list_lock);
1933}
1934
1935void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1936{
1937 struct amp_mgr_cb *cb;
1938
1939 BT_DBG("opcode 0x%x, status %d", opcode, status);
1940
1941 read_lock_bh(&amp_mgr_cb_list_lock);
1942 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1943 if (cb->amp_cmd_status_event)
1944 cb->amp_cmd_status_event(hdev, opcode, status);
1945 }
1946 read_unlock_bh(&amp_mgr_cb_list_lock);
1947}
1948
1949void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1950 struct sk_buff *skb)
1951{
1952 struct amp_mgr_cb *cb;
1953
1954 BT_DBG("ev_code 0x%x", ev_code);
1955
1956 read_lock_bh(&amp_mgr_cb_list_lock);
1957 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1958 if (cb->amp_event)
1959 cb->amp_event(hdev, ev_code, skb);
1960 }
1961 read_unlock_bh(&amp_mgr_cb_list_lock);
1962}
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964static int hci_send_frame(struct sk_buff *skb)
1965{
1966 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1967
1968 if (!hdev) {
1969 kfree_skb(skb);
1970 return -ENODEV;
1971 }
1972
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001973 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001975 if (atomic_read(&hdev->promisc)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001977 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001979 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 }
1981
1982 /* Get rid of skb owner, prior to sending to the driver. */
1983 skb_orphan(skb);
1984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 return hdev->send(skb);
1987}
1988
1989/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001990int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
1992 int len = HCI_COMMAND_HDR_SIZE + plen;
1993 struct hci_command_hdr *hdr;
1994 struct sk_buff *skb;
1995
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001996 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
1998 skb = bt_skb_alloc(len, GFP_ATOMIC);
1999 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002000 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 return -ENOMEM;
2002 }
2003
2004 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002005 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 hdr->plen = plen;
2007
2008 if (plen)
2009 memcpy(skb_put(skb, plen), param, plen);
2010
2011 BT_DBG("skb len %d", skb->len);
2012
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002013 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002015
Johan Hedberga5040ef2011-01-10 13:28:59 +02002016 if (test_bit(HCI_INIT, &hdev->flags))
2017 hdev->init_last_cmd = opcode;
2018
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002020 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
2022 return 0;
2023}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002024EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025
2026/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002027void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028{
2029 struct hci_command_hdr *hdr;
2030
2031 if (!hdev->sent_cmd)
2032 return NULL;
2033
2034 hdr = (void *) hdev->sent_cmd->data;
2035
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002036 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037 return NULL;
2038
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002039 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
2041 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2042}
2043
2044/* Send ACL data */
2045static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2046{
2047 struct hci_acl_hdr *hdr;
2048 int len = skb->len;
2049
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002050 skb_push(skb, HCI_ACL_HDR_SIZE);
2051 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002052 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002053 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2054 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055}
2056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002057void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2058 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059{
2060 struct hci_dev *hdev = conn->hdev;
2061 struct sk_buff *list;
2062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002063 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002066 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002067 if (hdev->dev_type == HCI_BREDR)
2068 hci_add_acl_hdr(skb, conn->handle, flags);
2069 else
2070 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002072 list = skb_shinfo(skb)->frag_list;
2073 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 /* Non fragmented */
2075 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2076
2077 skb_queue_tail(&conn->data_q, skb);
2078 } else {
2079 /* Fragmented */
2080 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2081
2082 skb_shinfo(skb)->frag_list = NULL;
2083
2084 /* Queue all fragments atomically */
2085 spin_lock_bh(&conn->data_q.lock);
2086
2087 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002088 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002089 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 do {
2091 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002094 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002095 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2098
2099 __skb_queue_tail(&conn->data_q, skb);
2100 } while (list);
2101
2102 spin_unlock_bh(&conn->data_q.lock);
2103 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002104
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002105 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107EXPORT_SYMBOL(hci_send_acl);
2108
2109/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002110void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
2112 struct hci_dev *hdev = conn->hdev;
2113 struct hci_sco_hdr hdr;
2114
2115 BT_DBG("%s len %d", hdev->name, skb->len);
2116
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002117 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 hdr.dlen = skb->len;
2119
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002120 skb_push(skb, HCI_SCO_HDR_SIZE);
2121 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002122 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002125 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002128 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129}
2130EXPORT_SYMBOL(hci_send_sco);
2131
2132/* ---- HCI TX task (outgoing data) ---- */
Srinivas Krovvidi672dff32012-06-29 19:37:45 +05302133/* HCI ACL Connection scheduler */
2134static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
2135 int *quote)
2136{
2137 struct hci_conn_hash *h = &hdev->conn_hash;
2138 struct hci_conn *conn = NULL;
2139 int num = 0, min = ~0, conn_num = 0;
2140 struct list_head *p;
2141
2142 /* We don't have to lock device here. Connections are always
2143 * added and removed with TX task disabled. */
2144 list_for_each(p, &h->list) {
2145 struct hci_conn *c;
2146 c = list_entry(p, struct hci_conn, list);
2147 if (c->type == ACL_LINK)
2148 conn_num++;
2149
2150 if (skb_queue_empty(&c->data_q))
2151 continue;
2152
2153 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2154 continue;
2155
2156 num++;
2157
2158 if (c->sent < min) {
2159 min = c->sent;
2160 conn = c;
2161 }
2162 }
2163
2164 if (conn) {
2165 int cnt, q;
2166 cnt = hdev->acl_cnt;
2167 q = cnt / num;
2168 *quote = q ? q : 1;
2169 } else
2170 *quote = 0;
2171
2172 if ((*quote == hdev->acl_cnt) &&
2173 (conn->sent == (hdev->acl_pkts - 1)) &&
2174 (conn_num > 1)) {
2175 *quote = 0;
2176 conn = NULL;
2177 }
2178
2179 BT_DBG("conn %p quote %d", conn, *quote);
2180 return conn;
2181}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
2183/* HCI Connection scheduler */
2184static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2185{
2186 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02002187 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 int num = 0, min = ~0;
2189 struct list_head *p;
2190
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002191 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 * added and removed with TX task disabled. */
2193 list_for_each(p, &h->list) {
2194 struct hci_conn *c;
2195 c = list_entry(p, struct hci_conn, list);
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002196
Marcel Holtmann769be972008-07-14 20:13:49 +02002197 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002199
2200 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2201 continue;
2202
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 num++;
2204
2205 if (c->sent < min) {
2206 min = c->sent;
2207 conn = c;
2208 }
2209 }
2210
2211 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002212 int cnt, q;
2213
2214 switch (conn->type) {
2215 case ACL_LINK:
2216 cnt = hdev->acl_cnt;
2217 break;
2218 case SCO_LINK:
2219 case ESCO_LINK:
2220 cnt = hdev->sco_cnt;
2221 break;
2222 case LE_LINK:
2223 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2224 break;
2225 default:
2226 cnt = 0;
2227 BT_ERR("Unknown link type");
2228 }
2229
2230 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 *quote = q ? q : 1;
2232 } else
2233 *quote = 0;
2234
2235 BT_DBG("conn %p quote %d", conn, *quote);
2236 return conn;
2237}
2238
Ville Tervobae1f5d2011-02-10 22:38:53 -03002239static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
2241 struct hci_conn_hash *h = &hdev->conn_hash;
2242 struct list_head *p;
2243 struct hci_conn *c;
2244
Ville Tervobae1f5d2011-02-10 22:38:53 -03002245 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247 /* Kill stalled connections */
2248 list_for_each(p, &h->list) {
2249 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002250 if (c->type == type && c->sent) {
2251 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 hdev->name, batostr(&c->dst));
2253 hci_acl_disconn(c, 0x13);
2254 }
2255 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002256}
2257
2258static inline void hci_sched_acl(struct hci_dev *hdev)
2259{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 struct hci_conn *conn;
2261 struct sk_buff *skb;
2262 int quote;
2263
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002264 BT_DBG("%s", hdev->name);
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 if (!test_bit(HCI_RAW, &hdev->flags)) {
2267 /* ACL tx timeout must be longer than maximum
2268 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269 if (hdev->acl_cnt <= 0 &&
2270 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002271 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002274 while (hdev->acl_cnt > 0 &&
Srinivas Krovvidi672dff32012-06-29 19:37:45 +05302275 ((conn = hci_low_sent_acl(hdev, &quote)) != NULL)) {
2276
2277 while (quote > 0 &&
2278 (skb = skb_dequeue(&conn->data_q))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002279 int count = 1;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002283 if (hdev->flow_ctl_mode ==
2284 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2285 /* Calculate count of blocks used by
2286 * this packet
2287 */
2288 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2289 hdev->data_block_len) + 1;
2290
2291 if (count > hdev->acl_cnt)
2292 return;
2293
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002294 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 hci_send_frame(skb);
2297 hdev->acl_last_tx = jiffies;
2298
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002299 hdev->acl_cnt -= count;
2300 quote -= count;
2301
2302 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002304 }
2305}
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307/* Schedule SCO */
2308static inline void hci_sched_sco(struct hci_dev *hdev)
2309{
2310 struct hci_conn *conn;
2311 struct sk_buff *skb;
2312 int quote;
2313
2314 BT_DBG("%s", hdev->name);
2315
2316 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2317 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2318 BT_DBG("skb %p len %d", skb, skb->len);
2319 hci_send_frame(skb);
2320
2321 conn->sent++;
2322 if (conn->sent == ~0)
2323 conn->sent = 0;
2324 }
2325 }
2326}
2327
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002328static inline void hci_sched_esco(struct hci_dev *hdev)
2329{
2330 struct hci_conn *conn;
2331 struct sk_buff *skb;
2332 int quote;
2333
2334 BT_DBG("%s", hdev->name);
2335
2336 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2337 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2338 BT_DBG("skb %p len %d", skb, skb->len);
2339 hci_send_frame(skb);
2340
2341 conn->sent++;
2342 if (conn->sent == ~0)
2343 conn->sent = 0;
2344 }
2345 }
2346}
2347
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002348static inline void hci_sched_le(struct hci_dev *hdev)
2349{
2350 struct hci_conn *conn;
2351 struct sk_buff *skb;
2352 int quote, cnt;
2353
2354 BT_DBG("%s", hdev->name);
2355
2356 if (!test_bit(HCI_RAW, &hdev->flags)) {
2357 /* LE tx timeout must be longer than maximum
2358 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002359 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002360 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002361 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002362 }
2363
2364 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2365 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2366 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2367 BT_DBG("skb %p len %d", skb, skb->len);
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002368
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002369 hci_send_frame(skb);
2370 hdev->le_last_tx = jiffies;
2371
2372 cnt--;
2373 conn->sent++;
2374 }
2375 }
2376 if (hdev->le_pkts)
2377 hdev->le_cnt = cnt;
2378 else
2379 hdev->acl_cnt = cnt;
2380}
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382static void hci_tx_task(unsigned long arg)
2383{
2384 struct hci_dev *hdev = (struct hci_dev *) arg;
2385 struct sk_buff *skb;
2386
2387 read_lock(&hci_task_lock);
2388
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002389 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2390 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 /* Schedule queues and send stuff to HCI driver */
2393
2394 hci_sched_acl(hdev);
2395
2396 hci_sched_sco(hdev);
2397
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002398 hci_sched_esco(hdev);
2399
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002400 hci_sched_le(hdev);
2401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 /* Send next queued raw (unknown type) packet */
2403 while ((skb = skb_dequeue(&hdev->raw_q)))
2404 hci_send_frame(skb);
2405
2406 read_unlock(&hci_task_lock);
2407}
2408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002409/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411/* ACL data packet */
2412static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2413{
2414 struct hci_acl_hdr *hdr = (void *) skb->data;
2415 struct hci_conn *conn;
2416 __u16 handle, flags;
2417
2418 skb_pull(skb, HCI_ACL_HDR_SIZE);
2419
2420 handle = __le16_to_cpu(hdr->handle);
2421 flags = hci_flags(handle);
2422 handle = hci_handle(handle);
2423
2424 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2425
2426 hdev->stat.acl_rx++;
2427
2428 hci_dev_lock(hdev);
2429 conn = hci_conn_hash_lookup_handle(hdev, handle);
2430 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 if (conn) {
2433 register struct hci_proto *hp;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002434
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002435 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Johan Hedberg671267b2012-05-12 16:11:50 -03002436
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002438 hp = hci_proto[HCI_PROTO_L2CAP];
2439 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 hp->recv_acldata(conn, skb, flags);
2441 return;
2442 }
2443 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002444 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 hdev->name, handle);
2446 }
2447
2448 kfree_skb(skb);
2449}
2450
2451/* SCO data packet */
2452static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 struct hci_sco_hdr *hdr = (void *) skb->data;
2455 struct hci_conn *conn;
2456 __u16 handle;
2457
2458 skb_pull(skb, HCI_SCO_HDR_SIZE);
2459
2460 handle = __le16_to_cpu(hdr->handle);
2461
2462 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2463
2464 hdev->stat.sco_rx++;
2465
2466 hci_dev_lock(hdev);
2467 conn = hci_conn_hash_lookup_handle(hdev, handle);
2468 hci_dev_unlock(hdev);
2469
2470 if (conn) {
2471 register struct hci_proto *hp;
2472
2473 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002474 hp = hci_proto[HCI_PROTO_SCO];
2475 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 hp->recv_scodata(conn, skb);
2477 return;
2478 }
2479 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002480 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 hdev->name, handle);
2482 }
2483
2484 kfree_skb(skb);
2485}
2486
Marcel Holtmann65164552005-10-28 19:20:48 +02002487static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488{
2489 struct hci_dev *hdev = (struct hci_dev *) arg;
2490 struct sk_buff *skb;
2491
2492 BT_DBG("%s", hdev->name);
2493
2494 read_lock(&hci_task_lock);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002495
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 while ((skb = skb_dequeue(&hdev->rx_q))) {
2497 if (atomic_read(&hdev->promisc)) {
2498 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002499 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 }
2501
2502 if (test_bit(HCI_RAW, &hdev->flags)) {
2503 kfree_skb(skb);
2504 continue;
2505 }
2506
2507 if (test_bit(HCI_INIT, &hdev->flags)) {
2508 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002509 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 case HCI_ACLDATA_PKT:
2511 case HCI_SCODATA_PKT:
2512 kfree_skb(skb);
2513 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002514 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 }
2516
2517 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002518 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 case HCI_EVENT_PKT:
2520 hci_event_packet(hdev, skb);
2521 break;
2522
2523 case HCI_ACLDATA_PKT:
2524 BT_DBG("%s ACL data packet", hdev->name);
2525 hci_acldata_packet(hdev, skb);
2526 break;
2527
2528 case HCI_SCODATA_PKT:
2529 BT_DBG("%s SCO data packet", hdev->name);
2530 hci_scodata_packet(hdev, skb);
2531 break;
2532
2533 default:
2534 kfree_skb(skb);
2535 break;
2536 }
2537 }
2538
2539 read_unlock(&hci_task_lock);
2540}
2541
2542static void hci_cmd_task(unsigned long arg)
2543{
2544 struct hci_dev *hdev = (struct hci_dev *) arg;
2545 struct sk_buff *skb;
2546
2547 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002550 if (atomic_read(&hdev->cmd_cnt)) {
2551 skb = skb_dequeue(&hdev->cmd_q);
2552 if (!skb)
2553 return;
2554
Wei Yongjun7585b972009-02-25 18:29:52 +08002555 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002557 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2558 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 atomic_dec(&hdev->cmd_cnt);
2560 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002561 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002562 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 } else {
2564 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002565 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 }
2567 }
2568}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002570module_param(enable_smp, bool, 0644);
2571MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");