blob: 9962c885c7fb62fd6ac89ef8323ae4cd2d338026 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Duy Truong790f06d2013-02-13 16:38:12 -08003 Copyright (c) 2000-2001, 2010-2012 The Linux Foundation. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090015 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090020 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI core. */
26
S.Çağlar Onur82453022008-02-17 23:25:57 -080027#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
29#include <linux/kmod.h>
30
31#include <linux/types.h>
32#include <linux/errno.h>
33#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/sched.h>
35#include <linux/slab.h>
36#include <linux/poll.h>
37#include <linux/fcntl.h>
38#include <linux/init.h>
39#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010040#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/interrupt.h>
42#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static void hci_cmd_task(unsigned long arg);
58static void hci_rx_task(unsigned long arg);
59static void hci_tx_task(unsigned long arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61static DEFINE_RWLOCK(hci_task_lock);
62
Steve Mucklef132c6c2012-06-06 18:30:57 -070063static bool enable_smp = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Linus Torvalds1da177e2005-04-16 15:20:36 -070065/* HCI device list */
66LIST_HEAD(hci_dev_list);
67DEFINE_RWLOCK(hci_dev_list_lock);
68
69/* HCI callback list */
70LIST_HEAD(hci_cb_list);
71DEFINE_RWLOCK(hci_cb_list_lock);
72
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073/* AMP Manager event callbacks */
74LIST_HEAD(amp_mgr_cb_list);
75DEFINE_RWLOCK(amp_mgr_cb_list_lock);
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/* HCI protocols */
78#define HCI_MAX_PROTO 2
79struct hci_proto *hci_proto[HCI_MAX_PROTO];
80
81/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080082static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84/* ---- HCI notifications ---- */
85
86int hci_register_notifier(struct notifier_block *nb)
87{
Alan Sterne041c682006-03-27 01:16:30 -080088 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089}
90
91int hci_unregister_notifier(struct notifier_block *nb)
92{
Alan Sterne041c682006-03-27 01:16:30 -080093 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Marcel Holtmann65164552005-10-28 19:20:48 +020096static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070097{
Alan Sterne041c682006-03-27 01:16:30 -080098 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
101/* ---- HCI requests ---- */
102
Johan Hedberg23bb5762010-12-21 23:01:27 +0200103void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
106
Johan Hedberga5040ef2011-01-10 13:28:59 +0200107 /* If this is the init phase check if the completed command matches
108 * the last init command, and if not just return.
109 */
110 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = result;
115 hdev->req_status = HCI_REQ_DONE;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
120static void hci_req_cancel(struct hci_dev *hdev, int err)
121{
122 BT_DBG("%s err 0x%2.2x", hdev->name, err);
123
124 if (hdev->req_status == HCI_REQ_PEND) {
125 hdev->req_result = err;
126 hdev->req_status = HCI_REQ_CANCELED;
127 wake_up_interruptible(&hdev->req_wait_q);
128 }
129}
130
131/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900132static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100133 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134{
135 DECLARE_WAITQUEUE(wait, current);
136 int err = 0;
137
138 BT_DBG("%s start", hdev->name);
139
140 hdev->req_status = HCI_REQ_PEND;
141
142 add_wait_queue(&hdev->req_wait_q, &wait);
143 set_current_state(TASK_INTERRUPTIBLE);
144
145 req(hdev, opt);
146 schedule_timeout(timeout);
147
148 remove_wait_queue(&hdev->req_wait_q, &wait);
149
150 if (signal_pending(current))
151 return -EINTR;
152
153 switch (hdev->req_status) {
154 case HCI_REQ_DONE:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155 err = -bt_err(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 break;
157
158 case HCI_REQ_CANCELED:
159 err = -hdev->req_result;
160 break;
161
162 default:
163 err = -ETIMEDOUT;
164 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700165 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
Johan Hedberga5040ef2011-01-10 13:28:59 +0200167 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 BT_DBG("%s end: err %d", hdev->name, err);
170
171 return err;
172}
173
174static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100175 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176{
177 int ret;
178
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200179 if (!test_bit(HCI_UP, &hdev->flags))
180 return -ENETDOWN;
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 /* Serialize all requests */
183 hci_req_lock(hdev);
184 ret = __hci_request(hdev, req, opt, timeout);
185 hci_req_unlock(hdev);
186
187 return ret;
188}
189
190static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
191{
192 BT_DBG("%s %ld", hdev->name, opt);
193
194 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 set_bit(HCI_RESET, &hdev->flags);
Brian Gix6e4531c2011-10-28 16:12:08 -0700196 memset(&hdev->features, 0, sizeof(hdev->features));
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198}
199
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200200static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
201{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200202 struct hci_cp_delete_stored_link_key cp;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200203 struct sk_buff *skb;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800204 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200205 __u8 flt_type;
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
211 /* Special commands */
212 while ((skb = skb_dequeue(&hdev->driver_init))) {
213 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
214 skb->dev = (void *) hdev;
215
216 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +0100217 tasklet_schedule(&hdev->cmd_task);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200218 }
219 skb_queue_purge(&hdev->driver_init);
220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Mandatory initialization */
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200222
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300224 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
225 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200227 }
228
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200229 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200231
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232
233 /* Set default HCI Flow Control Mode */
234 if (hdev->dev_type == HCI_BREDR)
235 hdev->flow_ctl_mode = HCI_PACKET_BASED_FLOW_CTL_MODE;
236 else
237 hdev->flow_ctl_mode = HCI_BLOCK_BASED_FLOW_CTL_MODE;
238
239 /* Read HCI Flow Control Mode */
240 hci_send_cmd(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200243 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700245 /* Read Data Block Size (ACL mtu, max pkt, etc.) */
246 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248#if 0
249 /* Host buffer size */
250 {
251 struct hci_cp_host_buffer_size cp;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700252 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 cp.sco_mtu = HCI_MAX_SCO_SIZE;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700254 cp.acl_max_pkt = cpu_to_le16(0xffff);
255 cp.sco_max_pkt = cpu_to_le16(0xffff);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258#endif
259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 if (hdev->dev_type == HCI_BREDR) {
261 /* BR-EDR initialization */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700263 /* Read Local Supported Features */
264 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 /* Read BD Address */
267 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269 /* Read Class of Device */
270 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700272 /* Read Local Name */
273 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 /* Read Voice Setting */
276 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700278 /* Optional initialization */
279 /* Clear Event Filters */
280 flt_type = HCI_FLT_CLEAR_ALL;
281 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200282
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700283 /* Connection accept timeout ~20 secs */
284 param = cpu_to_le16(0x7d00);
285 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
286
287 bacpy(&cp.bdaddr, BDADDR_ANY);
288 cp.delete_all = 1;
289 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
290 sizeof(cp), &cp);
291 } else {
292 /* AMP initialization */
293 /* Connection accept timeout ~5 secs */
294 param = cpu_to_le16(0x1f40);
295 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
296
297 /* Read AMP Info */
298 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
299 }
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200300}
301
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300302static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
303{
304 BT_DBG("%s", hdev->name);
305
306 /* Read LE buffer size */
307 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Sunny Kapdi93bef892012-07-30 14:52:56 -0700308
309 /* Read LE clear white list */
310 hci_send_cmd(hdev, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
311
312 /* Read LE white list size */
313 hci_send_cmd(hdev, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300314}
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
317{
318 __u8 scan = opt;
319
320 BT_DBG("%s %x", hdev->name, scan);
321
322 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200323 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324}
325
326static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
327{
328 __u8 auth = opt;
329
330 BT_DBG("%s %x", hdev->name, auth);
331
332 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200333 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
336static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
337{
338 __u8 encrypt = opt;
339
340 BT_DBG("%s %x", hdev->name, encrypt);
341
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200342 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200343 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344}
345
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200346static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
347{
348 __le16 policy = cpu_to_le16(opt);
349
Marcel Holtmanna418b892008-11-30 12:17:28 +0100350 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200351
352 /* Default link policy */
353 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
354}
355
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900356/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 * Device is held on return. */
358struct hci_dev *hci_dev_get(int index)
359{
360 struct hci_dev *hdev = NULL;
361 struct list_head *p;
362
363 BT_DBG("%d", index);
364
365 if (index < 0)
366 return NULL;
367
368 read_lock(&hci_dev_list_lock);
369 list_for_each(p, &hci_dev_list) {
370 struct hci_dev *d = list_entry(p, struct hci_dev, list);
371 if (d->id == index) {
372 hdev = hci_dev_hold(d);
373 break;
374 }
375 }
376 read_unlock(&hci_dev_list_lock);
377 return hdev;
378}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700379EXPORT_SYMBOL(hci_dev_get);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380
381/* ---- Inquiry support ---- */
382static void inquiry_cache_flush(struct hci_dev *hdev)
383{
384 struct inquiry_cache *cache = &hdev->inq_cache;
385 struct inquiry_entry *next = cache->list, *e;
386
387 BT_DBG("cache %p", cache);
388
389 cache->list = NULL;
390 while ((e = next)) {
391 next = e->next;
392 kfree(e);
393 }
394}
395
396struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
397{
398 struct inquiry_cache *cache = &hdev->inq_cache;
399 struct inquiry_entry *e;
400
401 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
402
403 for (e = cache->list; e; e = e->next)
404 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200405 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return e;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200407}
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
410{
411 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200412 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
415
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200416 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
417 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200419 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
420 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 return;
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200422
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200423 ie->next = cache->list;
424 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 }
426
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200427 memcpy(&ie->data, data, sizeof(*data));
428 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 cache->timestamp = jiffies;
430}
431
432static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
433{
434 struct inquiry_cache *cache = &hdev->inq_cache;
435 struct inquiry_info *info = (struct inquiry_info *) buf;
436 struct inquiry_entry *e;
437 int copied = 0;
438
439 for (e = cache->list; e && copied < num; e = e->next, copied++) {
440 struct inquiry_data *data = &e->data;
441 bacpy(&info->bdaddr, &data->bdaddr);
442 info->pscan_rep_mode = data->pscan_rep_mode;
443 info->pscan_period_mode = data->pscan_period_mode;
444 info->pscan_mode = data->pscan_mode;
445 memcpy(info->dev_class, data->dev_class, 3);
446 info->clock_offset = data->clock_offset;
447 info++;
448 }
449
450 BT_DBG("cache %p, copied %d", cache, copied);
451 return copied;
452}
453
454static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
455{
456 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
457 struct hci_cp_inquiry cp;
458
459 BT_DBG("%s", hdev->name);
460
461 if (test_bit(HCI_INQUIRY, &hdev->flags))
462 return;
463
464 /* Start Inquiry */
465 memcpy(&cp.lap, &ir->lap, 3);
466 cp.length = ir->length;
467 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200468 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469}
470
471int hci_inquiry(void __user *arg)
472{
473 __u8 __user *ptr = arg;
474 struct hci_inquiry_req ir;
475 struct hci_dev *hdev;
476 int err = 0, do_inquiry = 0, max_rsp;
477 long timeo;
478 __u8 *buf;
479
480 if (copy_from_user(&ir, ptr, sizeof(ir)))
481 return -EFAULT;
482
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200483 hdev = hci_dev_get(ir.dev_id);
484 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 return -ENODEV;
486
487 hci_dev_lock_bh(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900488 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489 inquiry_cache_empty(hdev) ||
490 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 inquiry_cache_flush(hdev);
492 do_inquiry = 1;
493 }
494 hci_dev_unlock_bh(hdev);
495
Marcel Holtmann04837f62006-07-03 10:02:33 +0200496 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
498 if (do_inquiry) {
499 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
500 if (err < 0)
501 goto done;
502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
504 /* for unlimited number of responses we will use buffer with 255 entries */
505 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
506
507 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
508 * copy it to the user space.
509 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100510 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 err = -ENOMEM;
513 goto done;
514 }
515
516 hci_dev_lock_bh(hdev);
517 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
518 hci_dev_unlock_bh(hdev);
519
520 BT_DBG("num_rsp %d", ir.num_rsp);
521
522 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
523 ptr += sizeof(ir);
524 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
525 ir.num_rsp))
526 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900527 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 err = -EFAULT;
529
530 kfree(buf);
531
532done:
533 hci_dev_put(hdev);
534 return err;
535}
536
537/* ---- HCI ioctl helpers ---- */
538
539int hci_dev_open(__u16 dev)
540{
541 struct hci_dev *hdev;
542 int ret = 0;
543
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200544 hdev = hci_dev_get(dev);
545 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return -ENODEV;
547
548 BT_DBG("%s %p", hdev->name, hdev);
549
550 hci_req_lock(hdev);
551
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200552 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
553 ret = -ERFKILL;
554 goto done;
555 }
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 if (test_bit(HCI_UP, &hdev->flags)) {
558 ret = -EALREADY;
559 goto done;
560 }
561
562 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
563 set_bit(HCI_RAW, &hdev->flags);
564
565 if (hdev->open(hdev)) {
566 ret = -EIO;
567 goto done;
568 }
569
Bhasker Netia6e6a4f2012-01-27 15:25:43 +0530570 if (!skb_queue_empty(&hdev->cmd_q)) {
571 BT_ERR("command queue is not empty, purging");
572 skb_queue_purge(&hdev->cmd_q);
573 }
574 if (!skb_queue_empty(&hdev->rx_q)) {
575 BT_ERR("rx queue is not empty, purging");
576 skb_queue_purge(&hdev->rx_q);
577 }
578 if (!skb_queue_empty(&hdev->raw_q)) {
579 BT_ERR("raw queue is not empty, purging");
580 skb_queue_purge(&hdev->raw_q);
581 }
582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 if (!test_bit(HCI_RAW, &hdev->flags)) {
584 atomic_set(&hdev->cmd_cnt, 1);
585 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200586 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Marcel Holtmann04837f62006-07-03 10:02:33 +0200588 ret = __hci_request(hdev, hci_init_req, 0,
589 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700591 if (lmp_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300592 ret = __hci_request(hdev, hci_le_init_req, 0,
593 msecs_to_jiffies(HCI_INIT_TIMEOUT));
594
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 clear_bit(HCI_INIT, &hdev->flags);
596 }
597
598 if (!ret) {
599 hci_dev_hold(hdev);
600 set_bit(HCI_UP, &hdev->flags);
601 hci_notify(hdev, HCI_DEV_UP);
Peter Krystad1fc44072011-08-30 15:38:12 -0700602 if (!test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800603 hdev->dev_type == HCI_BREDR) {
604 hci_dev_lock_bh(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200605 mgmt_powered(hdev->id, 1);
Subramanian Srinivasana727a492011-11-30 13:06:07 -0800606 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200607 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900608 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 /* Init failed, cleanup */
610 tasklet_kill(&hdev->rx_task);
611 tasklet_kill(&hdev->tx_task);
612 tasklet_kill(&hdev->cmd_task);
613
614 skb_queue_purge(&hdev->cmd_q);
615 skb_queue_purge(&hdev->rx_q);
616
617 if (hdev->flush)
618 hdev->flush(hdev);
619
620 if (hdev->sent_cmd) {
621 kfree_skb(hdev->sent_cmd);
622 hdev->sent_cmd = NULL;
623 }
624
625 hdev->close(hdev);
626 hdev->flags = 0;
627 }
628
629done:
630 hci_req_unlock(hdev);
631 hci_dev_put(hdev);
632 return ret;
633}
634
Mat Martineau3b9239a2012-02-16 11:54:30 -0800635static int hci_dev_do_close(struct hci_dev *hdev, u8 is_process)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636{
Mat Martineau4106b992011-11-18 15:26:21 -0800637 unsigned long keepflags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 BT_DBG("%s %p", hdev->name, hdev);
Andre Guedes28b75a82012-02-03 17:48:00 -0300640
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 hci_req_cancel(hdev, ENODEV);
642 hci_req_lock(hdev);
643
644 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300645 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 hci_req_unlock(hdev);
647 return 0;
648 }
649
650 /* Kill RX and TX tasks */
651 tasklet_kill(&hdev->rx_task);
652 tasklet_kill(&hdev->tx_task);
653
654 hci_dev_lock_bh(hdev);
655 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800656 hci_conn_hash_flush(hdev, is_process);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 hci_dev_unlock_bh(hdev);
658
659 hci_notify(hdev, HCI_DEV_DOWN);
660
Bhasker Netiffdff572011-12-21 17:24:01 -0800661 if (hdev->dev_type == HCI_BREDR) {
662 hci_dev_lock_bh(hdev);
663 mgmt_powered(hdev->id, 0);
664 hci_dev_unlock_bh(hdev);
665 }
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 if (hdev->flush)
668 hdev->flush(hdev);
669
670 /* Reset device */
671 skb_queue_purge(&hdev->cmd_q);
672 atomic_set(&hdev->cmd_cnt, 1);
673 if (!test_bit(HCI_RAW, &hdev->flags)) {
674 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200676 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 clear_bit(HCI_INIT, &hdev->flags);
678 }
679
680 /* Kill cmd task */
681 tasklet_kill(&hdev->cmd_task);
682
683 /* Drop queues */
684 skb_queue_purge(&hdev->rx_q);
685 skb_queue_purge(&hdev->cmd_q);
686 skb_queue_purge(&hdev->raw_q);
687
688 /* Drop last sent command */
689 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300690 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 kfree_skb(hdev->sent_cmd);
692 hdev->sent_cmd = NULL;
693 }
694
695 /* After this point our queues are empty
696 * and no tasks are scheduled. */
697 hdev->close(hdev);
698
Mat Martineau4106b992011-11-18 15:26:21 -0800699 /* Clear only non-persistent flags */
700 if (test_bit(HCI_MGMT, &hdev->flags))
701 set_bit(HCI_MGMT, &keepflags);
702 if (test_bit(HCI_LINK_KEYS, &hdev->flags))
703 set_bit(HCI_LINK_KEYS, &keepflags);
704 if (test_bit(HCI_DEBUG_KEYS, &hdev->flags))
705 set_bit(HCI_DEBUG_KEYS, &keepflags);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200706
Mat Martineau4106b992011-11-18 15:26:21 -0800707 hdev->flags = keepflags;
Johan Hedberge59fda82012-02-22 18:11:53 +0200708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 hci_req_unlock(hdev);
710
711 hci_dev_put(hdev);
712 return 0;
713}
714
715int hci_dev_close(__u16 dev)
716{
717 struct hci_dev *hdev;
718 int err;
719
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200720 hdev = hci_dev_get(dev);
721 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 return -ENODEV;
Mat Martineau3b9239a2012-02-16 11:54:30 -0800723 err = hci_dev_do_close(hdev, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 hci_dev_put(hdev);
725 return err;
726}
727
728int hci_dev_reset(__u16 dev)
729{
730 struct hci_dev *hdev;
731 int ret = 0;
732
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200733 hdev = hci_dev_get(dev);
734 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 return -ENODEV;
736
737 hci_req_lock(hdev);
738 tasklet_disable(&hdev->tx_task);
739
740 if (!test_bit(HCI_UP, &hdev->flags))
741 goto done;
742
743 /* Drop queues */
744 skb_queue_purge(&hdev->rx_q);
745 skb_queue_purge(&hdev->cmd_q);
746
747 hci_dev_lock_bh(hdev);
748 inquiry_cache_flush(hdev);
Mat Martineau3b9239a2012-02-16 11:54:30 -0800749 hci_conn_hash_flush(hdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 hci_dev_unlock_bh(hdev);
751
752 if (hdev->flush)
753 hdev->flush(hdev);
754
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900755 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300756 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200759 ret = __hci_request(hdev, hci_reset_req, 0,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762done:
763 tasklet_enable(&hdev->tx_task);
764 hci_req_unlock(hdev);
765 hci_dev_put(hdev);
766 return ret;
767}
768
769int hci_dev_reset_stat(__u16 dev)
770{
771 struct hci_dev *hdev;
772 int ret = 0;
773
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200774 hdev = hci_dev_get(dev);
775 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 return -ENODEV;
777
778 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
779
780 hci_dev_put(hdev);
781
782 return ret;
783}
784
785int hci_dev_cmd(unsigned int cmd, void __user *arg)
786{
787 struct hci_dev *hdev;
788 struct hci_dev_req dr;
789 int err = 0;
790
791 if (copy_from_user(&dr, arg, sizeof(dr)))
792 return -EFAULT;
793
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200794 hdev = hci_dev_get(dr.dev_id);
795 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return -ENODEV;
797
798 switch (cmd) {
799 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200800 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
801 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 break;
803
804 case HCISETENCRYPT:
805 if (!lmp_encrypt_capable(hdev)) {
806 err = -EOPNOTSUPP;
807 break;
808 }
809
810 if (!test_bit(HCI_AUTH, &hdev->flags)) {
811 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200812 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
813 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (err)
815 break;
816 }
817
Marcel Holtmann04837f62006-07-03 10:02:33 +0200818 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
819 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 break;
821
822 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200823 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
824 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 break;
826
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200827 case HCISETLINKPOL:
828 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
829 msecs_to_jiffies(HCI_INIT_TIMEOUT));
830 break;
831
832 case HCISETLINKMODE:
833 hdev->link_mode = ((__u16) dr.dev_opt) &
834 (HCI_LM_MASTER | HCI_LM_ACCEPT);
835 break;
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 case HCISETPTYPE:
838 hdev->pkt_type = (__u16) dr.dev_opt;
839 break;
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200842 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
843 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 break;
845
846 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200847 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
848 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 break;
850
851 default:
852 err = -EINVAL;
853 break;
854 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200855
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 hci_dev_put(hdev);
857 return err;
858}
859
860int hci_get_dev_list(void __user *arg)
861{
862 struct hci_dev_list_req *dl;
863 struct hci_dev_req *dr;
864 struct list_head *p;
865 int n = 0, size, err;
866 __u16 dev_num;
867
868 if (get_user(dev_num, (__u16 __user *) arg))
869 return -EFAULT;
870
871 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
872 return -EINVAL;
873
874 size = sizeof(*dl) + dev_num * sizeof(*dr);
875
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 dl = kzalloc(size, GFP_KERNEL);
877 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENOMEM;
879
880 dr = dl->dev_req;
881
882 read_lock_bh(&hci_dev_list_lock);
883 list_for_each(p, &hci_dev_list) {
884 struct hci_dev *hdev;
Johan Hedbergc542a062011-01-26 13:11:03 +0200885
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 hdev = list_entry(p, struct hci_dev, list);
887
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200888 hci_del_off_timer(hdev);
Johan Hedbergc542a062011-01-26 13:11:03 +0200889
890 if (!test_bit(HCI_MGMT, &hdev->flags))
891 set_bit(HCI_PAIRABLE, &hdev->flags);
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 (dr + n)->dev_id = hdev->id;
894 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200895
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 if (++n >= dev_num)
897 break;
898 }
899 read_unlock_bh(&hci_dev_list_lock);
900
901 dl->dev_num = n;
902 size = sizeof(*dl) + n * sizeof(*dr);
903
904 err = copy_to_user(arg, dl, size);
905 kfree(dl);
906
907 return err ? -EFAULT : 0;
908}
909
910int hci_get_dev_info(void __user *arg)
911{
912 struct hci_dev *hdev;
913 struct hci_dev_info di;
914 int err = 0;
915
916 if (copy_from_user(&di, arg, sizeof(di)))
917 return -EFAULT;
918
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200919 hdev = hci_dev_get(di.dev_id);
920 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 return -ENODEV;
922
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200923 hci_del_off_timer(hdev);
924
Johan Hedbergc542a062011-01-26 13:11:03 +0200925 if (!test_bit(HCI_MGMT, &hdev->flags))
926 set_bit(HCI_PAIRABLE, &hdev->flags);
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 strcpy(di.name, hdev->name);
929 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100930 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 di.flags = hdev->flags;
932 di.pkt_type = hdev->pkt_type;
933 di.acl_mtu = hdev->acl_mtu;
934 di.acl_pkts = hdev->acl_pkts;
935 di.sco_mtu = hdev->sco_mtu;
936 di.sco_pkts = hdev->sco_pkts;
937 di.link_policy = hdev->link_policy;
938 di.link_mode = hdev->link_mode;
939
940 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
941 memcpy(&di.features, &hdev->features, sizeof(di.features));
942
943 if (copy_to_user(arg, &di, sizeof(di)))
944 err = -EFAULT;
945
946 hci_dev_put(hdev);
947
948 return err;
949}
950
951/* ---- Interface to HCI drivers ---- */
952
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200953static int hci_rfkill_set_block(void *data, bool blocked)
954{
955 struct hci_dev *hdev = data;
956
957 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
958
959 if (!blocked)
960 return 0;
961
Mat Martineau3b9239a2012-02-16 11:54:30 -0800962 hci_dev_do_close(hdev, 0);
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200963
964 return 0;
965}
966
967static const struct rfkill_ops hci_rfkill_ops = {
968 .set_block = hci_rfkill_set_block,
969};
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971/* Alloc HCI device */
972struct hci_dev *hci_alloc_dev(void)
973{
974 struct hci_dev *hdev;
975
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200976 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 if (!hdev)
978 return NULL;
979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 skb_queue_head_init(&hdev->driver_init);
981
982 return hdev;
983}
984EXPORT_SYMBOL(hci_alloc_dev);
985
986/* Free HCI device */
987void hci_free_dev(struct hci_dev *hdev)
988{
989 skb_queue_purge(&hdev->driver_init);
990
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200991 /* will free via device release */
992 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993}
994EXPORT_SYMBOL(hci_free_dev);
995
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200996static void hci_power_on(struct work_struct *work)
997{
998 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Inga Stotland5029fc22011-09-12 15:22:52 -0700999 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001000
1001 BT_DBG("%s", hdev->name);
1002
Inga Stotland5029fc22011-09-12 15:22:52 -07001003 err = hci_dev_open(hdev->id);
1004 if (err && err != -EALREADY)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001005 return;
1006
Peter Krystad1fc44072011-08-30 15:38:12 -07001007 if (test_bit(HCI_AUTO_OFF, &hdev->flags) &&
1008 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001009 mod_timer(&hdev->off_timer,
1010 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1011
Peter Krystad1fc44072011-08-30 15:38:12 -07001012 if (test_and_clear_bit(HCI_SETUP, &hdev->flags) &&
1013 hdev->dev_type == HCI_BREDR)
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001014 mgmt_index_added(hdev->id);
1015}
1016
1017static void hci_power_off(struct work_struct *work)
1018{
1019 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
1020
1021 BT_DBG("%s", hdev->name);
1022
1023 hci_dev_close(hdev->id);
1024}
1025
1026static void hci_auto_off(unsigned long data)
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001027{
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001028 struct hci_dev *hdev = (struct hci_dev *) data;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001029
1030 BT_DBG("%s", hdev->name);
1031
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001032 clear_bit(HCI_AUTO_OFF, &hdev->flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001033
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001034 queue_work(hdev->workqueue, &hdev->power_off);
1035}
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001036
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001037void hci_del_off_timer(struct hci_dev *hdev)
1038{
1039 BT_DBG("%s", hdev->name);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001040
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001041 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1042 del_timer(&hdev->off_timer);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001043}
1044
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001045int hci_uuids_clear(struct hci_dev *hdev)
1046{
1047 struct list_head *p, *n;
1048
1049 list_for_each_safe(p, n, &hdev->uuids) {
1050 struct bt_uuid *uuid;
1051
1052 uuid = list_entry(p, struct bt_uuid, list);
1053
1054 list_del(p);
1055 kfree(uuid);
1056 }
1057
1058 return 0;
1059}
1060
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001061int hci_link_keys_clear(struct hci_dev *hdev)
1062{
1063 struct list_head *p, *n;
1064
1065 list_for_each_safe(p, n, &hdev->link_keys) {
1066 struct link_key *key;
1067
1068 key = list_entry(p, struct link_key, list);
1069
1070 list_del(p);
1071 kfree(key);
1072 }
1073
1074 return 0;
1075}
1076
1077struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1078{
1079 struct list_head *p;
1080
1081 list_for_each(p, &hdev->link_keys) {
1082 struct link_key *k;
1083
1084 k = list_entry(p, struct link_key, list);
1085
1086 if (bacmp(bdaddr, &k->bdaddr) == 0)
1087 return k;
1088 }
1089
1090 return NULL;
1091}
1092
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001093struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001094{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 struct list_head *p;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001097 list_for_each(p, &hdev->link_keys) {
1098 struct link_key *k;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001099 struct key_master_id *id;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 k = list_entry(p, struct link_key, list);
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001102
Brian Gixcf956772011-10-20 15:18:51 -07001103 if (k->key_type != KEY_TYPE_LTK)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001104 continue;
1105
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001106 if (k->dlen != sizeof(*id))
1107 continue;
1108
1109 id = (void *) &k->data;
1110 if (id->ediv == ediv &&
1111 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1112 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001113 }
1114
1115 return NULL;
1116}
1117EXPORT_SYMBOL(hci_find_ltk);
1118
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001119struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1120 bdaddr_t *bdaddr, u8 type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001121{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001122 struct list_head *p;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 list_for_each(p, &hdev->link_keys) {
1125 struct link_key *k;
1126
1127 k = list_entry(p, struct link_key, list);
1128
Brian Gixcf956772011-10-20 15:18:51 -07001129 if ((k->key_type == type) && (bacmp(bdaddr, &k->bdaddr) == 0))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001130 return k;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001131 }
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001132
1133 return NULL;
1134}
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001135EXPORT_SYMBOL(hci_find_link_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1138 u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001139{
1140 struct link_key *key, *old_key;
Brian Gixa68668b2011-08-11 15:49:36 -07001141 struct hci_conn *conn;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301142 u8 old_key_type;
Brian Gixa68668b2011-08-11 15:49:36 -07001143 u8 bonded = 0;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001144
1145 old_key = hci_find_link_key(hdev, bdaddr);
1146 if (old_key) {
Brian Gixcf956772011-10-20 15:18:51 -07001147 old_key_type = old_key->key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001148 key = old_key;
1149 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 old_key_type = 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001151 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1152 if (!key)
1153 return -ENOMEM;
1154 list_add(&key->list, &hdev->link_keys);
1155 }
1156
1157 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1158
1159 bacpy(&key->bdaddr, bdaddr);
1160 memcpy(key->val, val, 16);
Brian Gixa68668b2011-08-11 15:49:36 -07001161 key->auth = 0x01;
Brian Gixcf956772011-10-20 15:18:51 -07001162 key->key_type = type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001163 key->pin_len = pin_len;
1164
Brian Gixa68668b2011-08-11 15:49:36 -07001165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, bdaddr);
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301166 /* Store the link key persistently if one of the following is true:
1167 * 1. the remote side is using dedicated bonding since in that case
1168 * also the local requirements are set to dedicated bonding
1169 * 2. the local side had dedicated bonding as a requirement
1170 * 3. this is a legacy link key
1171 * 4. this is a changed combination key and there was a previously
1172 * stored one
1173 * If none of the above match only keep the link key around for
1174 * this connection and set the temporary flag for the device.
1175 */
Johan Hedberg4748fed2011-04-28 11:29:02 -07001176
Brian Gixdfdd9362011-08-18 09:58:02 -07001177 if (conn) {
Srinivas Krovvidi9ff51452011-09-27 19:25:02 +05301178 if ((conn->remote_auth > 0x01) ||
1179 (conn->auth_initiator && conn->auth_type > 0x01) ||
Brian Gixcf956772011-10-20 15:18:51 -07001180 (key->key_type < 0x03) ||
1181 (key->key_type == 0x06 && old_key_type != 0xff))
Brian Gixdfdd9362011-08-18 09:58:02 -07001182 bonded = 1;
1183 }
Johan Hedberg4df378a2011-04-28 11:29:03 -07001184
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001185 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001186 mgmt_new_key(hdev->id, key, bonded);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001188 if (type == 0x06)
Brian Gixcf956772011-10-20 15:18:51 -07001189 key->key_type = old_key_type;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001190
1191 return 0;
1192}
1193
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001194int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Brian Gixcf956772011-10-20 15:18:51 -07001195 u8 addr_type, u8 key_size, u8 auth,
1196 __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001197{
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001198 struct link_key *key, *old_key;
1199 struct key_master_id *id;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001200
Brian Gixcf956772011-10-20 15:18:51 -07001201 BT_DBG("%s Auth: %2.2X addr %s type: %d", hdev->name, auth,
1202 batostr(bdaddr), addr_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 old_key = hci_find_link_key_type(hdev, bdaddr, KEY_TYPE_LTK);
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001205 if (old_key) {
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001206 key = old_key;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001207 } else {
1208 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001209 if (!key)
1210 return -ENOMEM;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001211 list_add(&key->list, &hdev->link_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001212 }
1213
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001214 key->dlen = sizeof(*id);
1215
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216 bacpy(&key->bdaddr, bdaddr);
Brian Gixcf956772011-10-20 15:18:51 -07001217 key->addr_type = addr_type;
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001218 memcpy(key->val, ltk, sizeof(key->val));
Brian Gixcf956772011-10-20 15:18:51 -07001219 key->key_type = KEY_TYPE_LTK;
Vinicius Costa Gomes1fa2de32011-07-08 18:31:45 -03001220 key->pin_len = key_size;
Brian Gixa68668b2011-08-11 15:49:36 -07001221 key->auth = auth;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001222
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001223 id = (void *) &key->data;
1224 id->ediv = ediv;
1225 memcpy(id->rand, rand, sizeof(id->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226
Vinicius Costa Gomes7f9004d2011-07-07 18:59:36 -03001227 if (new_key)
Brian Gixa68668b2011-08-11 15:49:36 -07001228 mgmt_new_key(hdev->id, key, auth & 0x01);
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001229
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230 return 0;
1231}
1232
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001233int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234{
1235 struct link_key *key;
1236
1237 key = hci_find_link_key(hdev, bdaddr);
1238 if (!key)
1239 return -ENOENT;
1240
1241 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1242
1243 list_del(&key->list);
1244 kfree(key);
1245
1246 return 0;
1247}
1248
Ville Tervo6bd32322011-02-16 16:32:41 +02001249/* HCI command timer function */
1250static void hci_cmd_timer(unsigned long arg)
1251{
1252 struct hci_dev *hdev = (void *) arg;
1253
1254 BT_ERR("%s command tx timeout", hdev->name);
1255 atomic_set(&hdev->cmd_cnt, 1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 clear_bit(HCI_RESET, &hdev->flags);
Ville Tervo6bd32322011-02-16 16:32:41 +02001257 tasklet_schedule(&hdev->cmd_task);
1258}
1259
Szymon Janc2763eda2011-03-22 13:12:22 +01001260struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1261 bdaddr_t *bdaddr)
1262{
1263 struct oob_data *data;
1264
1265 list_for_each_entry(data, &hdev->remote_oob_data, list)
1266 if (bacmp(bdaddr, &data->bdaddr) == 0)
1267 return data;
1268
1269 return NULL;
1270}
1271
1272int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1273{
1274 struct oob_data *data;
1275
1276 data = hci_find_remote_oob_data(hdev, bdaddr);
1277 if (!data)
1278 return -ENOENT;
1279
1280 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1281
1282 list_del(&data->list);
1283 kfree(data);
1284
1285 return 0;
1286}
1287
1288int hci_remote_oob_data_clear(struct hci_dev *hdev)
1289{
1290 struct oob_data *data, *n;
1291
1292 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1293 list_del(&data->list);
1294 kfree(data);
1295 }
1296
1297 return 0;
1298}
1299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300static void hci_adv_clear(unsigned long arg)
1301{
1302 struct hci_dev *hdev = (void *) arg;
1303
1304 hci_adv_entries_clear(hdev);
1305}
1306
1307int hci_adv_entries_clear(struct hci_dev *hdev)
1308{
1309 struct list_head *p, *n;
1310
Brian Gixa68668b2011-08-11 15:49:36 -07001311 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 write_lock_bh(&hdev->adv_entries_lock);
1313
1314 list_for_each_safe(p, n, &hdev->adv_entries) {
1315 struct adv_entry *entry;
1316
1317 entry = list_entry(p, struct adv_entry, list);
1318
1319 list_del(p);
1320 kfree(entry);
1321 }
1322
1323 write_unlock_bh(&hdev->adv_entries_lock);
1324
1325 return 0;
1326}
1327
1328struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1329{
1330 struct list_head *p;
1331 struct adv_entry *res = NULL;
1332
Brian Gixa68668b2011-08-11 15:49:36 -07001333 BT_DBG("");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 read_lock_bh(&hdev->adv_entries_lock);
1335
1336 list_for_each(p, &hdev->adv_entries) {
1337 struct adv_entry *entry;
1338
1339 entry = list_entry(p, struct adv_entry, list);
1340
1341 if (bacmp(bdaddr, &entry->bdaddr) == 0) {
1342 res = entry;
1343 goto out;
1344 }
1345 }
1346out:
1347 read_unlock_bh(&hdev->adv_entries_lock);
1348 return res;
1349}
1350
1351static inline int is_connectable_adv(u8 evt_type)
1352{
1353 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1354 return 1;
1355
1356 return 0;
1357}
1358
Szymon Janc2763eda2011-03-22 13:12:22 +01001359int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1360 u8 *randomizer)
1361{
1362 struct oob_data *data;
1363
1364 data = hci_find_remote_oob_data(hdev, bdaddr);
1365
1366 if (!data) {
1367 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1368 if (!data)
1369 return -ENOMEM;
1370
1371 bacpy(&data->bdaddr, bdaddr);
1372 list_add(&data->list, &hdev->remote_oob_data);
1373 }
1374
1375 memcpy(data->hash, hash, sizeof(data->hash));
1376 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1377
1378 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1379
1380 return 0;
1381}
1382
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001383int hci_add_adv_entry(struct hci_dev *hdev,
1384 struct hci_ev_le_advertising_info *ev)
Andre Guedes76c86862011-05-26 16:23:50 -03001385{
1386 struct adv_entry *entry;
Brian Gixfdd38922011-09-28 16:23:48 -07001387 u8 flags = 0;
1388 int i;
Andre Guedes76c86862011-05-26 16:23:50 -03001389
Brian Gixa68668b2011-08-11 15:49:36 -07001390 BT_DBG("");
Andre Guedes76c86862011-05-26 16:23:50 -03001391
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001392 if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001393 return -EINVAL;
1394
Brian Gixfdd38922011-09-28 16:23:48 -07001395 if (ev->data && ev->length) {
1396 for (i = 0; (i + 2) < ev->length; i++)
1397 if (ev->data[i+1] == 0x01) {
1398 flags = ev->data[i+2];
1399 BT_DBG("flags: %2.2x", flags);
1400 break;
1401 } else {
1402 i += ev->data[i];
1403 }
1404 }
1405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 entry = hci_find_adv_entry(hdev, &ev->bdaddr);
Andre Guedes76c86862011-05-26 16:23:50 -03001407 /* Only new entries should be added to adv_entries. So, if
1408 * bdaddr was found, don't add it. */
Brian Gixfdd38922011-09-28 16:23:48 -07001409 if (entry) {
1410 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001411 return 0;
Brian Gixfdd38922011-09-28 16:23:48 -07001412 }
Andre Guedes76c86862011-05-26 16:23:50 -03001413
Andre Guedes6c77c8c2011-05-26 16:23:50 -03001414 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
Andre Guedes76c86862011-05-26 16:23:50 -03001415 if (!entry)
1416 return -ENOMEM;
1417
1418 bacpy(&entry->bdaddr, &ev->bdaddr);
1419 entry->bdaddr_type = ev->bdaddr_type;
Brian Gixfdd38922011-09-28 16:23:48 -07001420 entry->flags = flags;
Andre Guedes76c86862011-05-26 16:23:50 -03001421
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 write_lock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001423 list_add(&entry->list, &hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001424 write_unlock(&hdev->adv_entries_lock);
Andre Guedes76c86862011-05-26 16:23:50 -03001425
1426 return 0;
1427}
1428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001429static struct crypto_blkcipher *alloc_cypher(void)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001430{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001431 if (enable_smp)
1432 return crypto_alloc_blkcipher("ecb(aes)", 0, CRYPTO_ALG_ASYNC);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001433
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001434 return ERR_PTR(-ENOTSUPP);
Andre Guedes28b75a82012-02-03 17:48:00 -03001435}
1436
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437/* Register HCI device */
1438int hci_register_dev(struct hci_dev *hdev)
1439{
1440 struct list_head *head = &hci_dev_list, *p;
Peter Krystad462bf762011-09-19 14:20:20 -07001441 int i, id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001443 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1444 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 if (!hdev->open || !hdev->close || !hdev->destruct)
1447 return -EINVAL;
1448
Mat Martineau08add512011-11-02 16:18:36 -07001449 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 write_lock_bh(&hci_dev_list_lock);
1452
1453 /* Find first available device id */
1454 list_for_each(p, &hci_dev_list) {
1455 if (list_entry(p, struct hci_dev, list)->id != id)
1456 break;
1457 head = p; id++;
1458 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001459
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 sprintf(hdev->name, "hci%d", id);
1461 hdev->id = id;
1462 list_add(&hdev->list, head);
1463
1464 atomic_set(&hdev->refcnt, 1);
1465 spin_lock_init(&hdev->lock);
1466
1467 hdev->flags = 0;
1468 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001469 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001471 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
Marcel Holtmann04837f62006-07-03 10:02:33 +02001473 hdev->idle_timeout = 0;
1474 hdev->sniff_max_interval = 800;
1475 hdev->sniff_min_interval = 80;
1476
Bhasker Neti8bfd3c52013-02-25 22:07:43 +05301477 set_bit(HCI_SETUP, &hdev->flags);
1478
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001479 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1481 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1482
1483 skb_queue_head_init(&hdev->rx_q);
1484 skb_queue_head_init(&hdev->cmd_q);
1485 skb_queue_head_init(&hdev->raw_q);
1486
Ville Tervo6bd32322011-02-16 16:32:41 +02001487 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
Brian Gix568dde92012-01-11 16:18:04 -08001488 setup_timer(&hdev->disco_timer, mgmt_disco_timeout,
1489 (unsigned long) hdev);
1490 setup_timer(&hdev->disco_le_timer, mgmt_disco_le_timeout,
1491 (unsigned long) hdev);
Ville Tervo6bd32322011-02-16 16:32:41 +02001492
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301493 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001494 hdev->reassembly[i] = NULL;
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001497 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498
1499 inquiry_cache_init(hdev);
1500
1501 hci_conn_hash_init(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001502 hci_chan_list_init(hdev);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001503
David Millerea4bd8b2010-07-30 21:54:49 -07001504 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001505
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001506 INIT_LIST_HEAD(&hdev->uuids);
1507
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001508 INIT_LIST_HEAD(&hdev->link_keys);
1509
Szymon Janc2763eda2011-03-22 13:12:22 +01001510 INIT_LIST_HEAD(&hdev->remote_oob_data);
1511
Andre Guedes76c86862011-05-26 16:23:50 -03001512 INIT_LIST_HEAD(&hdev->adv_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 rwlock_init(&hdev->adv_entries_lock);
1514 setup_timer(&hdev->adv_timer, hci_adv_clear, (unsigned long) hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001515
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001516 INIT_WORK(&hdev->power_on, hci_power_on);
1517 INIT_WORK(&hdev->power_off, hci_power_off);
1518 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001519
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1521
1522 atomic_set(&hdev->promisc, 0);
1523
1524 write_unlock_bh(&hci_dev_list_lock);
Andre Guedes28b75a82012-02-03 17:48:00 -03001525
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001526 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1527 if (!hdev->workqueue)
1528 goto nomem;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001529
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001530 hdev->tfm = alloc_cypher();
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001531 if (IS_ERR(hdev->tfm))
1532 BT_INFO("Failed to load transform for ecb(aes): %ld",
1533 PTR_ERR(hdev->tfm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534
1535 hci_register_sysfs(hdev);
1536
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001537 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1538 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1539 if (hdev->rfkill) {
1540 if (rfkill_register(hdev->rfkill) < 0) {
1541 rfkill_destroy(hdev->rfkill);
1542 hdev->rfkill = NULL;
1543 }
1544 }
1545
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001546 set_bit(HCI_AUTO_OFF, &hdev->flags);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001547 queue_work(hdev->workqueue, &hdev->power_on);
1548
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 hci_notify(hdev, HCI_DEV_REG);
1550
1551 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001552
1553nomem:
1554 write_lock_bh(&hci_dev_list_lock);
1555 list_del(&hdev->list);
1556 write_unlock_bh(&hci_dev_list_lock);
1557
1558 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559}
1560EXPORT_SYMBOL(hci_register_dev);
1561
1562/* Unregister HCI device */
1563int hci_unregister_dev(struct hci_dev *hdev)
1564{
Marcel Holtmannef222012007-07-11 06:42:04 +02001565 int i;
1566
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001567 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 write_lock_bh(&hci_dev_list_lock);
1570 list_del(&hdev->list);
1571 write_unlock_bh(&hci_dev_list_lock);
1572
Ankit Vermad3e99df2012-04-25 15:42:04 -07001573 hci_dev_do_close(hdev, hdev->bus == HCI_SMD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301575 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001576 kfree_skb(hdev->reassembly[i]);
1577
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001578 if (!test_bit(HCI_INIT, &hdev->flags) &&
Peter Krystad1fc44072011-08-30 15:38:12 -07001579 !test_bit(HCI_SETUP, &hdev->flags) &&
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001580 hdev->dev_type == HCI_BREDR) {
1581 hci_dev_lock_bh(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001582 mgmt_index_removed(hdev->id);
Subramanian Srinivasana727a492011-11-30 13:06:07 -08001583 hci_dev_unlock_bh(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001584 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001585
Vinicius Costa Gomes09fabbc2011-06-09 18:50:43 -03001586 if (!IS_ERR(hdev->tfm))
1587 crypto_free_blkcipher(hdev->tfm);
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001588
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 hci_notify(hdev, HCI_DEV_UNREG);
1590
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001591 if (hdev->rfkill) {
1592 rfkill_unregister(hdev->rfkill);
1593 rfkill_destroy(hdev->rfkill);
1594 }
1595
Dave Young147e2d52008-03-05 18:45:59 -08001596 hci_unregister_sysfs(hdev);
1597
Brian Gix3cd62042012-01-11 15:18:17 -08001598 /* Disable all timers */
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001599 hci_del_off_timer(hdev);
Andre Guedes45e600f2011-05-26 16:23:53 -03001600 del_timer(&hdev->adv_timer);
Brian Gix3cd62042012-01-11 15:18:17 -08001601 del_timer(&hdev->cmd_timer);
Brian Gix568dde92012-01-11 16:18:04 -08001602 del_timer(&hdev->disco_timer);
1603 del_timer(&hdev->disco_le_timer);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001604
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001605 destroy_workqueue(hdev->workqueue);
1606
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001607 hci_dev_lock_bh(hdev);
1608 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001609 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001610 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001611 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001612 hci_adv_entries_clear(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001613 hci_dev_unlock_bh(hdev);
1614
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615 __hci_dev_put(hdev);
Marcel Holtmannef222012007-07-11 06:42:04 +02001616
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 return 0;
1618}
1619EXPORT_SYMBOL(hci_unregister_dev);
1620
1621/* Suspend HCI device */
1622int hci_suspend_dev(struct hci_dev *hdev)
1623{
1624 hci_notify(hdev, HCI_DEV_SUSPEND);
1625 return 0;
1626}
1627EXPORT_SYMBOL(hci_suspend_dev);
1628
1629/* Resume HCI device */
1630int hci_resume_dev(struct hci_dev *hdev)
1631{
1632 hci_notify(hdev, HCI_DEV_RESUME);
1633 return 0;
1634}
1635EXPORT_SYMBOL(hci_resume_dev);
1636
Marcel Holtmann76bca882009-11-18 00:40:39 +01001637/* Receive frame from HCI drivers */
1638int hci_recv_frame(struct sk_buff *skb)
1639{
1640 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1641 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1642 && !test_bit(HCI_INIT, &hdev->flags))) {
1643 kfree_skb(skb);
1644 return -ENXIO;
1645 }
1646
1647 /* Incomming skb */
1648 bt_cb(skb)->incoming = 1;
1649
1650 /* Time stamp */
1651 __net_timestamp(skb);
1652
1653 /* Queue frame for rx task */
1654 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001655 tasklet_schedule(&hdev->rx_task);
1656
Marcel Holtmann76bca882009-11-18 00:40:39 +01001657 return 0;
1658}
1659EXPORT_SYMBOL(hci_recv_frame);
1660
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301661static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001662 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301663{
1664 int len = 0;
1665 int hlen = 0;
1666 int remain = count;
1667 struct sk_buff *skb;
1668 struct bt_skb_cb *scb;
1669
1670 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1671 index >= NUM_REASSEMBLY)
1672 return -EILSEQ;
1673
1674 skb = hdev->reassembly[index];
1675
1676 if (!skb) {
1677 switch (type) {
1678 case HCI_ACLDATA_PKT:
1679 len = HCI_MAX_FRAME_SIZE;
1680 hlen = HCI_ACL_HDR_SIZE;
1681 break;
1682 case HCI_EVENT_PKT:
1683 len = HCI_MAX_EVENT_SIZE;
1684 hlen = HCI_EVENT_HDR_SIZE;
1685 break;
1686 case HCI_SCODATA_PKT:
1687 len = HCI_MAX_SCO_SIZE;
1688 hlen = HCI_SCO_HDR_SIZE;
1689 break;
1690 }
1691
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001692 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301693 if (!skb)
1694 return -ENOMEM;
1695
1696 scb = (void *) skb->cb;
1697 scb->expect = hlen;
1698 scb->pkt_type = type;
1699
1700 skb->dev = (void *) hdev;
1701 hdev->reassembly[index] = skb;
1702 }
1703
1704 while (count) {
1705 scb = (void *) skb->cb;
1706 len = min(scb->expect, (__u16)count);
1707
1708 memcpy(skb_put(skb, len), data, len);
1709
1710 count -= len;
1711 data += len;
1712 scb->expect -= len;
1713 remain = count;
1714
1715 switch (type) {
1716 case HCI_EVENT_PKT:
1717 if (skb->len == HCI_EVENT_HDR_SIZE) {
1718 struct hci_event_hdr *h = hci_event_hdr(skb);
1719 scb->expect = h->plen;
1720
1721 if (skb_tailroom(skb) < scb->expect) {
1722 kfree_skb(skb);
1723 hdev->reassembly[index] = NULL;
1724 return -ENOMEM;
1725 }
1726 }
1727 break;
1728
1729 case HCI_ACLDATA_PKT:
1730 if (skb->len == HCI_ACL_HDR_SIZE) {
1731 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1732 scb->expect = __le16_to_cpu(h->dlen);
1733
1734 if (skb_tailroom(skb) < scb->expect) {
1735 kfree_skb(skb);
1736 hdev->reassembly[index] = NULL;
1737 return -ENOMEM;
1738 }
1739 }
1740 break;
1741
1742 case HCI_SCODATA_PKT:
1743 if (skb->len == HCI_SCO_HDR_SIZE) {
1744 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1745 scb->expect = h->dlen;
1746
1747 if (skb_tailroom(skb) < scb->expect) {
1748 kfree_skb(skb);
1749 hdev->reassembly[index] = NULL;
1750 return -ENOMEM;
1751 }
1752 }
1753 break;
1754 }
1755
1756 if (scb->expect == 0) {
1757 /* Complete frame */
1758
1759 bt_cb(skb)->pkt_type = type;
1760 hci_recv_frame(skb);
1761
1762 hdev->reassembly[index] = NULL;
1763 return remain;
1764 }
1765 }
1766
1767 return remain;
1768}
1769
Marcel Holtmannef222012007-07-11 06:42:04 +02001770int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1771{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301772 int rem = 0;
1773
Marcel Holtmannef222012007-07-11 06:42:04 +02001774 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1775 return -EILSEQ;
1776
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001777 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001778 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301779 if (rem < 0)
1780 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001781
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301782 data += (count - rem);
1783 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001784 };
Marcel Holtmannef222012007-07-11 06:42:04 +02001785
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301786 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001787}
1788EXPORT_SYMBOL(hci_recv_fragment);
1789
Suraj Sumangala99811512010-07-14 13:02:19 +05301790#define STREAM_REASSEMBLY 0
1791
1792int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1793{
1794 int type;
1795 int rem = 0;
1796
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001797 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301798 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1799
1800 if (!skb) {
1801 struct { char type; } *pkt;
1802
1803 /* Start of the frame */
1804 pkt = data;
1805 type = pkt->type;
1806
1807 data++;
1808 count--;
1809 } else
1810 type = bt_cb(skb)->pkt_type;
1811
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001812 rem = hci_reassembly(hdev, type, data, count,
1813 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301814 if (rem < 0)
1815 return rem;
1816
1817 data += (count - rem);
1818 count = rem;
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001819 };
Suraj Sumangala99811512010-07-14 13:02:19 +05301820
1821 return rem;
1822}
1823EXPORT_SYMBOL(hci_recv_stream_fragment);
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825/* ---- Interface to upper protocols ---- */
1826
1827/* Register/Unregister protocols.
1828 * hci_task_lock is used to ensure that no tasks are running. */
1829int hci_register_proto(struct hci_proto *hp)
1830{
1831 int err = 0;
1832
1833 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1834
1835 if (hp->id >= HCI_MAX_PROTO)
1836 return -EINVAL;
1837
1838 write_lock_bh(&hci_task_lock);
1839
1840 if (!hci_proto[hp->id])
1841 hci_proto[hp->id] = hp;
1842 else
1843 err = -EEXIST;
1844
1845 write_unlock_bh(&hci_task_lock);
1846
1847 return err;
1848}
1849EXPORT_SYMBOL(hci_register_proto);
1850
1851int hci_unregister_proto(struct hci_proto *hp)
1852{
1853 int err = 0;
1854
1855 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1856
1857 if (hp->id >= HCI_MAX_PROTO)
1858 return -EINVAL;
1859
1860 write_lock_bh(&hci_task_lock);
1861
1862 if (hci_proto[hp->id])
1863 hci_proto[hp->id] = NULL;
1864 else
1865 err = -ENOENT;
1866
1867 write_unlock_bh(&hci_task_lock);
1868
1869 return err;
1870}
1871EXPORT_SYMBOL(hci_unregister_proto);
1872
1873int hci_register_cb(struct hci_cb *cb)
1874{
1875 BT_DBG("%p name %s", cb, cb->name);
1876
1877 write_lock_bh(&hci_cb_list_lock);
1878 list_add(&cb->list, &hci_cb_list);
1879 write_unlock_bh(&hci_cb_list_lock);
1880
1881 return 0;
1882}
1883EXPORT_SYMBOL(hci_register_cb);
1884
1885int hci_unregister_cb(struct hci_cb *cb)
1886{
1887 BT_DBG("%p name %s", cb, cb->name);
1888
1889 write_lock_bh(&hci_cb_list_lock);
1890 list_del(&cb->list);
1891 write_unlock_bh(&hci_cb_list_lock);
1892
1893 return 0;
1894}
1895EXPORT_SYMBOL(hci_unregister_cb);
1896
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001897int hci_register_amp(struct amp_mgr_cb *cb)
1898{
1899 BT_DBG("%p", cb);
1900
1901 write_lock_bh(&amp_mgr_cb_list_lock);
1902 list_add(&cb->list, &amp_mgr_cb_list);
1903 write_unlock_bh(&amp_mgr_cb_list_lock);
1904
1905 return 0;
1906}
1907EXPORT_SYMBOL(hci_register_amp);
1908
1909int hci_unregister_amp(struct amp_mgr_cb *cb)
1910{
1911 BT_DBG("%p", cb);
1912
1913 write_lock_bh(&amp_mgr_cb_list_lock);
1914 list_del(&cb->list);
1915 write_unlock_bh(&amp_mgr_cb_list_lock);
1916
1917 return 0;
1918}
1919EXPORT_SYMBOL(hci_unregister_amp);
1920
1921void hci_amp_cmd_complete(struct hci_dev *hdev, __u16 opcode,
1922 struct sk_buff *skb)
1923{
1924 struct amp_mgr_cb *cb;
1925
1926 BT_DBG("opcode 0x%x", opcode);
1927
1928 read_lock_bh(&amp_mgr_cb_list_lock);
1929 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1930 if (cb->amp_cmd_complete_event)
1931 cb->amp_cmd_complete_event(hdev, opcode, skb);
1932 }
1933 read_unlock_bh(&amp_mgr_cb_list_lock);
1934}
1935
1936void hci_amp_cmd_status(struct hci_dev *hdev, __u16 opcode, __u8 status)
1937{
1938 struct amp_mgr_cb *cb;
1939
1940 BT_DBG("opcode 0x%x, status %d", opcode, status);
1941
1942 read_lock_bh(&amp_mgr_cb_list_lock);
1943 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1944 if (cb->amp_cmd_status_event)
1945 cb->amp_cmd_status_event(hdev, opcode, status);
1946 }
1947 read_unlock_bh(&amp_mgr_cb_list_lock);
1948}
1949
1950void hci_amp_event_packet(struct hci_dev *hdev, __u8 ev_code,
1951 struct sk_buff *skb)
1952{
1953 struct amp_mgr_cb *cb;
1954
1955 BT_DBG("ev_code 0x%x", ev_code);
1956
1957 read_lock_bh(&amp_mgr_cb_list_lock);
1958 list_for_each_entry(cb, &amp_mgr_cb_list, list) {
1959 if (cb->amp_event)
1960 cb->amp_event(hdev, ev_code, skb);
1961 }
1962 read_unlock_bh(&amp_mgr_cb_list_lock);
1963}
1964
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965static int hci_send_frame(struct sk_buff *skb)
1966{
1967 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1968
1969 if (!hdev) {
1970 kfree_skb(skb);
1971 return -ENODEV;
1972 }
1973
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001974 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
Marcel Holtmanncd82e612012-02-20 20:34:38 +01001976 if (atomic_read(&hdev->promisc)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001978 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001980 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 }
1982
1983 /* Get rid of skb owner, prior to sending to the driver. */
1984 skb_orphan(skb);
1985
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001986 hci_notify(hdev, HCI_DEV_WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 return hdev->send(skb);
1988}
1989
1990/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001991int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992{
1993 int len = HCI_COMMAND_HDR_SIZE + plen;
1994 struct hci_command_hdr *hdr;
1995 struct sk_buff *skb;
1996
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001997 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 skb = bt_skb_alloc(len, GFP_ATOMIC);
2000 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002001 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 return -ENOMEM;
2003 }
2004
2005 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002006 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 hdr->plen = plen;
2008
2009 if (plen)
2010 memcpy(skb_put(skb, plen), param, plen);
2011
2012 BT_DBG("skb len %d", skb->len);
2013
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002014 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002016
Johan Hedberga5040ef2011-01-10 13:28:59 +02002017 if (test_bit(HCI_INIT, &hdev->flags))
2018 hdev->init_last_cmd = opcode;
2019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 skb_queue_tail(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002021 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
2023 return 0;
2024}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002025EXPORT_SYMBOL(hci_send_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
2027/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002028void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
2030 struct hci_command_hdr *hdr;
2031
2032 if (!hdev->sent_cmd)
2033 return NULL;
2034
2035 hdr = (void *) hdev->sent_cmd->data;
2036
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002037 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 return NULL;
2039
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002040 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
2042 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2043}
2044
2045/* Send ACL data */
2046static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2047{
2048 struct hci_acl_hdr *hdr;
2049 int len = skb->len;
2050
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002051 skb_push(skb, HCI_ACL_HDR_SIZE);
2052 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002053 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002054 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2055 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056}
2057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002058void hci_send_acl(struct hci_conn *conn, struct hci_chan *chan,
2059 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060{
2061 struct hci_dev *hdev = conn->hdev;
2062 struct sk_buff *list;
2063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002064 BT_DBG("%s conn %p chan %p flags 0x%x", hdev->name, conn, chan, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002067 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002068 if (hdev->dev_type == HCI_BREDR)
2069 hci_add_acl_hdr(skb, conn->handle, flags);
2070 else
2071 hci_add_acl_hdr(skb, chan->ll_handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002073 list = skb_shinfo(skb)->frag_list;
2074 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 /* Non fragmented */
2076 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2077
2078 skb_queue_tail(&conn->data_q, skb);
2079 } else {
2080 /* Fragmented */
2081 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2082
2083 skb_shinfo(skb)->frag_list = NULL;
2084
2085 /* Queue all fragments atomically */
2086 spin_lock_bh(&conn->data_q.lock);
2087
2088 __skb_queue_tail(&conn->data_q, skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002089 flags &= ~ACL_PB_MASK;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002090 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 do {
2092 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002095 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002096 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2099
2100 __skb_queue_tail(&conn->data_q, skb);
2101 } while (list);
2102
2103 spin_unlock_bh(&conn->data_q.lock);
2104 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002105
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002106 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108EXPORT_SYMBOL(hci_send_acl);
2109
2110/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002111void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
2113 struct hci_dev *hdev = conn->hdev;
2114 struct hci_sco_hdr hdr;
2115
2116 BT_DBG("%s len %d", hdev->name, skb->len);
2117
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002118 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 hdr.dlen = skb->len;
2120
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002121 skb_push(skb, HCI_SCO_HDR_SIZE);
2122 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002123 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
2125 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002126 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 skb_queue_tail(&conn->data_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002129 tasklet_schedule(&hdev->tx_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130}
2131EXPORT_SYMBOL(hci_send_sco);
2132
2133/* ---- HCI TX task (outgoing data) ---- */
Srinivas Krovvidi672dff32012-06-29 19:37:45 +05302134/* HCI ACL Connection scheduler */
2135static inline struct hci_conn *hci_low_sent_acl(struct hci_dev *hdev,
2136 int *quote)
2137{
2138 struct hci_conn_hash *h = &hdev->conn_hash;
2139 struct hci_conn *conn = NULL;
2140 int num = 0, min = ~0, conn_num = 0;
2141 struct list_head *p;
2142
2143 /* We don't have to lock device here. Connections are always
2144 * added and removed with TX task disabled. */
2145 list_for_each(p, &h->list) {
2146 struct hci_conn *c;
2147 c = list_entry(p, struct hci_conn, list);
2148 if (c->type == ACL_LINK)
2149 conn_num++;
2150
2151 if (skb_queue_empty(&c->data_q))
2152 continue;
2153
2154 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2155 continue;
2156
2157 num++;
2158
2159 if (c->sent < min) {
2160 min = c->sent;
2161 conn = c;
2162 }
2163 }
2164
2165 if (conn) {
2166 int cnt, q;
2167 cnt = hdev->acl_cnt;
2168 q = cnt / num;
2169 *quote = q ? q : 1;
2170 } else
2171 *quote = 0;
2172
2173 if ((*quote == hdev->acl_cnt) &&
2174 (conn->sent == (hdev->acl_pkts - 1)) &&
2175 (conn_num > 1)) {
2176 *quote = 0;
2177 conn = NULL;
2178 }
2179
2180 BT_DBG("conn %p quote %d", conn, *quote);
2181 return conn;
2182}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184/* HCI Connection scheduler */
2185static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2186{
2187 struct hci_conn_hash *h = &hdev->conn_hash;
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02002188 struct hci_conn *conn = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 int num = 0, min = ~0;
2190 struct list_head *p;
2191
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002192 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 * added and removed with TX task disabled. */
2194 list_for_each(p, &h->list) {
2195 struct hci_conn *c;
2196 c = list_entry(p, struct hci_conn, list);
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002197
Marcel Holtmann769be972008-07-14 20:13:49 +02002198 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002200
2201 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2202 continue;
2203
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 num++;
2205
2206 if (c->sent < min) {
2207 min = c->sent;
2208 conn = c;
2209 }
2210 }
2211
2212 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002213 int cnt, q;
2214
2215 switch (conn->type) {
2216 case ACL_LINK:
2217 cnt = hdev->acl_cnt;
2218 break;
2219 case SCO_LINK:
2220 case ESCO_LINK:
2221 cnt = hdev->sco_cnt;
2222 break;
2223 case LE_LINK:
2224 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2225 break;
2226 default:
2227 cnt = 0;
2228 BT_ERR("Unknown link type");
2229 }
2230
2231 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 *quote = q ? q : 1;
2233 } else
2234 *quote = 0;
2235
2236 BT_DBG("conn %p quote %d", conn, *quote);
2237 return conn;
2238}
2239
Ville Tervobae1f5d2011-02-10 22:38:53 -03002240static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
2242 struct hci_conn_hash *h = &hdev->conn_hash;
2243 struct list_head *p;
2244 struct hci_conn *c;
2245
Ville Tervobae1f5d2011-02-10 22:38:53 -03002246 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
2248 /* Kill stalled connections */
2249 list_for_each(p, &h->list) {
2250 c = list_entry(p, struct hci_conn, list);
Ville Tervobae1f5d2011-02-10 22:38:53 -03002251 if (c->type == type && c->sent) {
2252 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 hdev->name, batostr(&c->dst));
2254 hci_acl_disconn(c, 0x13);
2255 }
2256 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002257}
2258
2259static inline void hci_sched_acl(struct hci_dev *hdev)
2260{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 struct hci_conn *conn;
2262 struct sk_buff *skb;
2263 int quote;
2264
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002265 BT_DBG("%s", hdev->name);
2266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 if (!test_bit(HCI_RAW, &hdev->flags)) {
2268 /* ACL tx timeout must be longer than maximum
2269 * link supervision timeout (40.9 seconds) */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002270 if (hdev->acl_cnt <= 0 &&
2271 time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002272 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 while (hdev->acl_cnt > 0 &&
Srinivas Krovvidi672dff32012-06-29 19:37:45 +05302276 ((conn = hci_low_sent_acl(hdev, &quote)) != NULL)) {
2277
2278 while (quote > 0 &&
2279 (skb = skb_dequeue(&conn->data_q))) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002280 int count = 1;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002284 if (hdev->flow_ctl_mode ==
2285 HCI_BLOCK_BASED_FLOW_CTL_MODE)
2286 /* Calculate count of blocks used by
2287 * this packet
2288 */
2289 count = ((skb->len - HCI_ACL_HDR_SIZE - 1) /
2290 hdev->data_block_len) + 1;
2291
2292 if (count > hdev->acl_cnt)
2293 return;
2294
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002295 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 hci_send_frame(skb);
2298 hdev->acl_last_tx = jiffies;
2299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300 hdev->acl_cnt -= count;
2301 quote -= count;
2302
2303 conn->sent += count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 }
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002305 }
2306}
2307
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308/* Schedule SCO */
2309static inline void hci_sched_sco(struct hci_dev *hdev)
2310{
2311 struct hci_conn *conn;
2312 struct sk_buff *skb;
2313 int quote;
2314
2315 BT_DBG("%s", hdev->name);
2316
2317 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2318 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2319 BT_DBG("skb %p len %d", skb, skb->len);
2320 hci_send_frame(skb);
2321
2322 conn->sent++;
2323 if (conn->sent == ~0)
2324 conn->sent = 0;
2325 }
2326 }
2327}
2328
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002329static inline void hci_sched_esco(struct hci_dev *hdev)
2330{
2331 struct hci_conn *conn;
2332 struct sk_buff *skb;
2333 int quote;
2334
2335 BT_DBG("%s", hdev->name);
2336
2337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2341
2342 conn->sent++;
2343 if (conn->sent == ~0)
2344 conn->sent = 0;
2345 }
2346 }
2347}
2348
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002349static inline void hci_sched_le(struct hci_dev *hdev)
2350{
2351 struct hci_conn *conn;
2352 struct sk_buff *skb;
2353 int quote, cnt;
2354
2355 BT_DBG("%s", hdev->name);
2356
2357 if (!test_bit(HCI_RAW, &hdev->flags)) {
2358 /* LE tx timeout must be longer than maximum
2359 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002360 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002361 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002362 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002363 }
2364
2365 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2366 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2367 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2368 BT_DBG("skb %p len %d", skb, skb->len);
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002369
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002370 hci_send_frame(skb);
2371 hdev->le_last_tx = jiffies;
2372
2373 cnt--;
2374 conn->sent++;
2375 }
2376 }
2377 if (hdev->le_pkts)
2378 hdev->le_cnt = cnt;
2379 else
2380 hdev->acl_cnt = cnt;
2381}
2382
Linus Torvalds1da177e2005-04-16 15:20:36 -07002383static void hci_tx_task(unsigned long arg)
2384{
2385 struct hci_dev *hdev = (struct hci_dev *) arg;
2386 struct sk_buff *skb;
2387
2388 read_lock(&hci_task_lock);
2389
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002390 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2391 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393 /* Schedule queues and send stuff to HCI driver */
2394
2395 hci_sched_acl(hdev);
2396
2397 hci_sched_sco(hdev);
2398
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002399 hci_sched_esco(hdev);
2400
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002401 hci_sched_le(hdev);
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 /* Send next queued raw (unknown type) packet */
2404 while ((skb = skb_dequeue(&hdev->raw_q)))
2405 hci_send_frame(skb);
2406
2407 read_unlock(&hci_task_lock);
2408}
2409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002410/* ----- HCI RX task (incoming data proccessing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
2412/* ACL data packet */
2413static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2414{
2415 struct hci_acl_hdr *hdr = (void *) skb->data;
2416 struct hci_conn *conn;
2417 __u16 handle, flags;
2418
2419 skb_pull(skb, HCI_ACL_HDR_SIZE);
2420
2421 handle = __le16_to_cpu(hdr->handle);
2422 flags = hci_flags(handle);
2423 handle = hci_handle(handle);
2424
2425 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2426
2427 hdev->stat.acl_rx++;
2428
2429 hci_dev_lock(hdev);
2430 conn = hci_conn_hash_lookup_handle(hdev, handle);
2431 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 if (conn) {
2434 register struct hci_proto *hp;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002435
Jaikumar Ganesh514abe62011-05-23 18:06:04 -07002436 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
Johan Hedberg671267b2012-05-12 16:11:50 -03002437
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002439 hp = hci_proto[HCI_PROTO_L2CAP];
2440 if (hp && hp->recv_acldata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441 hp->recv_acldata(conn, skb, flags);
2442 return;
2443 }
2444 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002445 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 hdev->name, handle);
2447 }
2448
2449 kfree_skb(skb);
2450}
2451
2452/* SCO data packet */
2453static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2454{
2455 struct hci_sco_hdr *hdr = (void *) skb->data;
2456 struct hci_conn *conn;
2457 __u16 handle;
2458
2459 skb_pull(skb, HCI_SCO_HDR_SIZE);
2460
2461 handle = __le16_to_cpu(hdr->handle);
2462
2463 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2464
2465 hdev->stat.sco_rx++;
2466
2467 hci_dev_lock(hdev);
2468 conn = hci_conn_hash_lookup_handle(hdev, handle);
2469 hci_dev_unlock(hdev);
2470
2471 if (conn) {
2472 register struct hci_proto *hp;
2473
2474 /* Send to upper protocol */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002475 hp = hci_proto[HCI_PROTO_SCO];
2476 if (hp && hp->recv_scodata) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 hp->recv_scodata(conn, skb);
2478 return;
2479 }
2480 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002481 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 hdev->name, handle);
2483 }
2484
2485 kfree_skb(skb);
2486}
2487
Marcel Holtmann65164552005-10-28 19:20:48 +02002488static void hci_rx_task(unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489{
2490 struct hci_dev *hdev = (struct hci_dev *) arg;
2491 struct sk_buff *skb;
2492
2493 BT_DBG("%s", hdev->name);
2494
2495 read_lock(&hci_task_lock);
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002496
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 while ((skb = skb_dequeue(&hdev->rx_q))) {
2498 if (atomic_read(&hdev->promisc)) {
2499 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002500 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 }
2502
2503 if (test_bit(HCI_RAW, &hdev->flags)) {
2504 kfree_skb(skb);
2505 continue;
2506 }
2507
2508 if (test_bit(HCI_INIT, &hdev->flags)) {
2509 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002510 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 case HCI_ACLDATA_PKT:
2512 case HCI_SCODATA_PKT:
2513 kfree_skb(skb);
2514 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002515 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
2517
2518 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002519 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520 case HCI_EVENT_PKT:
2521 hci_event_packet(hdev, skb);
2522 break;
2523
2524 case HCI_ACLDATA_PKT:
2525 BT_DBG("%s ACL data packet", hdev->name);
2526 hci_acldata_packet(hdev, skb);
2527 break;
2528
2529 case HCI_SCODATA_PKT:
2530 BT_DBG("%s SCO data packet", hdev->name);
2531 hci_scodata_packet(hdev, skb);
2532 break;
2533
2534 default:
2535 kfree_skb(skb);
2536 break;
2537 }
2538 }
2539
2540 read_unlock(&hci_task_lock);
2541}
2542
2543static void hci_cmd_task(unsigned long arg)
2544{
2545 struct hci_dev *hdev = (struct hci_dev *) arg;
2546 struct sk_buff *skb;
2547
2548 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2549
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002551 if (atomic_read(&hdev->cmd_cnt)) {
2552 skb = skb_dequeue(&hdev->cmd_q);
2553 if (!skb)
2554 return;
2555
Wei Yongjun7585b972009-02-25 18:29:52 +08002556 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002558 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2559 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 atomic_dec(&hdev->cmd_cnt);
2561 hci_send_frame(skb);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002562 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002563 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 } else {
2565 skb_queue_head(&hdev->cmd_q, skb);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002566 tasklet_schedule(&hdev->cmd_task);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
2568 }
2569}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002570
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002571module_param(enable_smp, bool, 0644);
2572MODULE_PARM_DESC(enable_smp, "Enable SMP support (LE only)");