blob: 87ff7ffdb36731818b3bab088fcffc05171ab62e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Fabio Estevam8b281b92012-01-10 18:33:50 -020057bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058
Marcel Holtmannb78752c2010-08-08 23:06:53 -040059static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020060static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020061static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/* HCI device list */
64LIST_HEAD(hci_dev_list);
65DEFINE_RWLOCK(hci_dev_list_lock);
66
67/* HCI callback list */
68LIST_HEAD(hci_cb_list);
69DEFINE_RWLOCK(hci_cb_list_lock);
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/* ---- HCI notifications ---- */
72
Marcel Holtmann65164552005-10-28 19:20:48 +020073static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070074{
Marcel Holtmann040030e2012-02-20 14:50:37 +010075 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076}
77
78/* ---- HCI requests ---- */
79
Johan Hedberg23bb5762010-12-21 23:01:27 +020080void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
Johan Hedberg23bb5762010-12-21 23:01:27 +020082 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
83
Johan Hedberga5040ef2011-01-10 13:28:59 +020084 /* If this is the init phase check if the completed command matches
85 * the last init command, and if not just return.
86 */
87 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 if (hdev->req_status == HCI_REQ_PEND) {
91 hdev->req_result = result;
92 hdev->req_status = HCI_REQ_DONE;
93 wake_up_interruptible(&hdev->req_wait_q);
94 }
95}
96
97static void hci_req_cancel(struct hci_dev *hdev, int err)
98{
99 BT_DBG("%s err 0x%2.2x", hdev->name, err);
100
101 if (hdev->req_status == HCI_REQ_PEND) {
102 hdev->req_result = err;
103 hdev->req_status = HCI_REQ_CANCELED;
104 wake_up_interruptible(&hdev->req_wait_q);
105 }
106}
107
108/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900109static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 DECLARE_WAITQUEUE(wait, current);
113 int err = 0;
114
115 BT_DBG("%s start", hdev->name);
116
117 hdev->req_status = HCI_REQ_PEND;
118
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
121
122 req(hdev, opt);
123 schedule_timeout(timeout);
124
125 remove_wait_queue(&hdev->req_wait_q, &wait);
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 switch (hdev->req_status) {
131 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700132 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 break;
134
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
137 break;
138
139 default:
140 err = -ETIMEDOUT;
141 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Johan Hedberga5040ef2011-01-10 13:28:59 +0200144 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 BT_DBG("%s end: err %d", hdev->name, err);
147
148 return err;
149}
150
151static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100152 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153{
154 int ret;
155
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200156 if (!test_bit(HCI_UP, &hdev->flags))
157 return -ENETDOWN;
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /* Serialize all requests */
160 hci_req_lock(hdev);
161 ret = __hci_request(hdev, req, opt, timeout);
162 hci_req_unlock(hdev);
163
164 return ret;
165}
166
167static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
168{
169 BT_DBG("%s %ld", hdev->name, opt);
170
171 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300172 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200173 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200176static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200178 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800179 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200180 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200182 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 /* Mandatory initialization */
185
186 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300187 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200188 set_bit(HCI_RESET, &hdev->flags);
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200195 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200199 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200202 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
203
204 /* Read Class of Device */
205 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
206
207 /* Read Local Name */
208 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213 /* Optional initialization */
214
215 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200216 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700220 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200222
223 bacpy(&cp.bdaddr, BDADDR_ANY);
224 cp.delete_all = 1;
225 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200228static void amp_init(struct hci_dev *hdev)
229{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200230 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
231
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200232 /* Reset */
233 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
234
235 /* Read Local Version */
236 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
257 switch (hdev->dev_type) {
258 case HCI_BREDR:
259 bredr_init(hdev);
260 break;
261
262 case HCI_AMP:
263 amp_init(hdev);
264 break;
265
266 default:
267 BT_ERR("Unknown device type %d", hdev->dev_type);
268 break;
269 }
270
271}
272
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300273static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
274{
275 BT_DBG("%s", hdev->name);
276
277 /* Read LE buffer size */
278 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279}
280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282{
283 __u8 scan = opt;
284
285 BT_DBG("%s %x", hdev->name, scan);
286
287 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200288 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292{
293 __u8 auth = opt;
294
295 BT_DBG("%s %x", hdev->name, auth);
296
297 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200298 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
301static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302{
303 __u8 encrypt = opt;
304
305 BT_DBG("%s %x", hdev->name, encrypt);
306
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200307 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200308 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
310
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200311static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
312{
313 __le16 policy = cpu_to_le16(opt);
314
Marcel Holtmanna418b892008-11-30 12:17:28 +0100315 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200316
317 /* Default link policy */
318 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
319}
320
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900321/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 * Device is held on return. */
323struct hci_dev *hci_dev_get(int index)
324{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200325 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326
327 BT_DBG("%d", index);
328
329 if (index < 0)
330 return NULL;
331
332 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200333 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 if (d->id == index) {
335 hdev = hci_dev_hold(d);
336 break;
337 }
338 }
339 read_unlock(&hci_dev_list_lock);
340 return hdev;
341}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
343/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200344
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200345bool hci_discovery_active(struct hci_dev *hdev)
346{
347 struct discovery_state *discov = &hdev->discovery;
348
Andre Guedes6fbe1952012-02-03 17:47:58 -0300349 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300350 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300351 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200352 return true;
353
Andre Guedes6fbe1952012-02-03 17:47:58 -0300354 default:
355 return false;
356 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200357}
358
Johan Hedbergff9ef572012-01-04 14:23:45 +0200359void hci_discovery_set_state(struct hci_dev *hdev, int state)
360{
361 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
362
363 if (hdev->discovery.state == state)
364 return;
365
366 switch (state) {
367 case DISCOVERY_STOPPED:
Andre Guedes4aab14e2012-02-17 20:39:36 -0300368 hdev->discovery.type = 0;
369
Andre Guedes7b99b652012-02-13 15:41:02 -0300370 if (hdev->discovery.state != DISCOVERY_STARTING)
371 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200372 break;
373 case DISCOVERY_STARTING:
374 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300375 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200376 mgmt_discovering(hdev, 1);
377 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200378 case DISCOVERY_RESOLVING:
379 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200380 case DISCOVERY_STOPPING:
381 break;
382 }
383
384 hdev->discovery.state = state;
385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387static void inquiry_cache_flush(struct hci_dev *hdev)
388{
Johan Hedberg30883512012-01-04 14:16:21 +0200389 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200390 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Johan Hedberg561aafb2012-01-04 13:31:59 +0200392 list_for_each_entry_safe(p, n, &cache->all, all) {
393 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200394 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200396
397 INIT_LIST_HEAD(&cache->unknown);
398 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200399 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
403{
Johan Hedberg30883512012-01-04 14:16:21 +0200404 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 struct inquiry_entry *e;
406
407 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 return e;
412 }
413
414 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
418 bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
425 list_for_each_entry(e, &cache->unknown, list) {
426 if (!bacmp(&e->data.bdaddr, bdaddr))
427 return e;
428 }
429
430 return NULL;
431}
432
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
434 bdaddr_t *bdaddr,
435 int state)
436{
437 struct discovery_state *cache = &hdev->discovery;
438 struct inquiry_entry *e;
439
440 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
441
442 list_for_each_entry(e, &cache->resolve, list) {
443 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
444 return e;
445 if (!bacmp(&e->data.bdaddr, bdaddr))
446 return e;
447 }
448
449 return NULL;
450}
451
Johan Hedberga3d4e202012-01-09 00:53:02 +0200452void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
453 struct inquiry_entry *ie)
454{
455 struct discovery_state *cache = &hdev->discovery;
456 struct list_head *pos = &cache->resolve;
457 struct inquiry_entry *p;
458
459 list_del(&ie->list);
460
461 list_for_each_entry(p, &cache->resolve, list) {
462 if (p->name_state != NAME_PENDING &&
463 abs(p->data.rssi) >= abs(ie->data.rssi))
464 break;
465 pos = &p->list;
466 }
467
468 list_add(&ie->list, pos);
469}
470
Johan Hedberg31754052012-01-04 13:39:52 +0200471bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200472 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473{
Johan Hedberg30883512012-01-04 14:16:21 +0200474 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
477 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
478
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200479 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200480 if (ie) {
481 if (ie->name_state == NAME_NEEDED &&
482 data->rssi != ie->data.rssi) {
483 ie->data.rssi = data->rssi;
484 hci_inquiry_cache_update_resolve(hdev, ie);
485 }
486
Johan Hedberg561aafb2012-01-04 13:31:59 +0200487 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489
Johan Hedberg561aafb2012-01-04 13:31:59 +0200490 /* Entry not in the cache. Add new one. */
491 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
492 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200493 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200494
495 list_add(&ie->all, &cache->all);
496
497 if (name_known) {
498 ie->name_state = NAME_KNOWN;
499 } else {
500 ie->name_state = NAME_NOT_KNOWN;
501 list_add(&ie->list, &cache->unknown);
502 }
503
504update:
505 if (name_known && ie->name_state != NAME_KNOWN &&
506 ie->name_state != NAME_PENDING) {
507 ie->name_state = NAME_KNOWN;
508 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 }
510
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511 memcpy(&ie->data, data, sizeof(*data));
512 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200514
515 if (ie->name_state == NAME_NOT_KNOWN)
516 return false;
517
518 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
521static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
522{
Johan Hedberg30883512012-01-04 14:16:21 +0200523 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 struct inquiry_info *info = (struct inquiry_info *) buf;
525 struct inquiry_entry *e;
526 int copied = 0;
527
Johan Hedberg561aafb2012-01-04 13:31:59 +0200528 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200530
531 if (copied >= num)
532 break;
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 bacpy(&info->bdaddr, &data->bdaddr);
535 info->pscan_rep_mode = data->pscan_rep_mode;
536 info->pscan_period_mode = data->pscan_period_mode;
537 info->pscan_mode = data->pscan_mode;
538 memcpy(info->dev_class, data->dev_class, 3);
539 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200542 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 }
544
545 BT_DBG("cache %p, copied %d", cache, copied);
546 return copied;
547}
548
549static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
550{
551 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
552 struct hci_cp_inquiry cp;
553
554 BT_DBG("%s", hdev->name);
555
556 if (test_bit(HCI_INQUIRY, &hdev->flags))
557 return;
558
559 /* Start Inquiry */
560 memcpy(&cp.lap, &ir->lap, 3);
561 cp.length = ir->length;
562 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200563 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
566int hci_inquiry(void __user *arg)
567{
568 __u8 __user *ptr = arg;
569 struct hci_inquiry_req ir;
570 struct hci_dev *hdev;
571 int err = 0, do_inquiry = 0, max_rsp;
572 long timeo;
573 __u8 *buf;
574
575 if (copy_from_user(&ir, ptr, sizeof(ir)))
576 return -EFAULT;
577
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200578 hdev = hci_dev_get(ir.dev_id);
579 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 return -ENODEV;
581
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300582 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900583 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200584 inquiry_cache_empty(hdev) ||
585 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 inquiry_cache_flush(hdev);
587 do_inquiry = 1;
588 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300589 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Marcel Holtmann04837f62006-07-03 10:02:33 +0200591 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200592
593 if (do_inquiry) {
594 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
595 if (err < 0)
596 goto done;
597 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
599 /* for unlimited number of responses we will use buffer with 255 entries */
600 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
601
602 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
603 * copy it to the user space.
604 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100605 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 err = -ENOMEM;
608 goto done;
609 }
610
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300613 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
615 BT_DBG("num_rsp %d", ir.num_rsp);
616
617 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
618 ptr += sizeof(ir);
619 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
620 ir.num_rsp))
621 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900622 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 err = -EFAULT;
624
625 kfree(buf);
626
627done:
628 hci_dev_put(hdev);
629 return err;
630}
631
632/* ---- HCI ioctl helpers ---- */
633
634int hci_dev_open(__u16 dev)
635{
636 struct hci_dev *hdev;
637 int ret = 0;
638
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200639 hdev = hci_dev_get(dev);
640 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 return -ENODEV;
642
643 BT_DBG("%s %p", hdev->name, hdev);
644
645 hci_req_lock(hdev);
646
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200647 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
648 ret = -ERFKILL;
649 goto done;
650 }
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652 if (test_bit(HCI_UP, &hdev->flags)) {
653 ret = -EALREADY;
654 goto done;
655 }
656
657 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
658 set_bit(HCI_RAW, &hdev->flags);
659
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200660 /* Treat all non BR/EDR controllers as raw devices if
661 enable_hs is not set */
662 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100663 set_bit(HCI_RAW, &hdev->flags);
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (hdev->open(hdev)) {
666 ret = -EIO;
667 goto done;
668 }
669
670 if (!test_bit(HCI_RAW, &hdev->flags)) {
671 atomic_set(&hdev->cmd_cnt, 1);
672 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200673 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674
Marcel Holtmann04837f62006-07-03 10:02:33 +0200675 ret = __hci_request(hdev, hci_init_req, 0,
676 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
Andre Guedeseead27d2011-06-30 19:20:55 -0300678 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300679 ret = __hci_request(hdev, hci_le_init_req, 0,
680 msecs_to_jiffies(HCI_INIT_TIMEOUT));
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 clear_bit(HCI_INIT, &hdev->flags);
683 }
684
685 if (!ret) {
686 hci_dev_hold(hdev);
687 set_bit(HCI_UP, &hdev->flags);
688 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200689 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300690 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200691 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300692 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200693 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900694 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200696 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200697 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400698 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 skb_queue_purge(&hdev->cmd_q);
701 skb_queue_purge(&hdev->rx_q);
702
703 if (hdev->flush)
704 hdev->flush(hdev);
705
706 if (hdev->sent_cmd) {
707 kfree_skb(hdev->sent_cmd);
708 hdev->sent_cmd = NULL;
709 }
710
711 hdev->close(hdev);
712 hdev->flags = 0;
713 }
714
715done:
716 hci_req_unlock(hdev);
717 hci_dev_put(hdev);
718 return ret;
719}
720
721static int hci_dev_do_close(struct hci_dev *hdev)
722{
723 BT_DBG("%s %p", hdev->name, hdev);
724
Andre Guedes28b75a82012-02-03 17:48:00 -0300725 cancel_work_sync(&hdev->le_scan);
726
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 hci_req_cancel(hdev, ENODEV);
728 hci_req_lock(hdev);
729
730 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300731 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 hci_req_unlock(hdev);
733 return 0;
734 }
735
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200736 /* Flush RX and TX works */
737 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400738 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200740 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200741 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200742 hdev->discov_timeout = 0;
743 }
744
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200745 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200746 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200747
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200748 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200749 cancel_delayed_work(&hdev->service_cache);
750
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300751 cancel_delayed_work_sync(&hdev->le_scan_disable);
752
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300753 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 inquiry_cache_flush(hdev);
755 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300756 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 hci_notify(hdev, HCI_DEV_DOWN);
759
760 if (hdev->flush)
761 hdev->flush(hdev);
762
763 /* Reset device */
764 skb_queue_purge(&hdev->cmd_q);
765 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200766 if (!test_bit(HCI_RAW, &hdev->flags) &&
767 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200769 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200770 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 clear_bit(HCI_INIT, &hdev->flags);
772 }
773
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200774 /* flush cmd work */
775 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 /* Drop queues */
778 skb_queue_purge(&hdev->rx_q);
779 skb_queue_purge(&hdev->cmd_q);
780 skb_queue_purge(&hdev->raw_q);
781
782 /* Drop last sent command */
783 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300784 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 kfree_skb(hdev->sent_cmd);
786 hdev->sent_cmd = NULL;
787 }
788
789 /* After this point our queues are empty
790 * and no tasks are scheduled. */
791 hdev->close(hdev);
792
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300793 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200794 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300795 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200796
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 /* Clear flags */
798 hdev->flags = 0;
799
800 hci_req_unlock(hdev);
801
802 hci_dev_put(hdev);
803 return 0;
804}
805
806int hci_dev_close(__u16 dev)
807{
808 struct hci_dev *hdev;
809 int err;
810
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200811 hdev = hci_dev_get(dev);
812 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 return -ENODEV;
814 err = hci_dev_do_close(hdev);
815 hci_dev_put(hdev);
816 return err;
817}
818
819int hci_dev_reset(__u16 dev)
820{
821 struct hci_dev *hdev;
822 int ret = 0;
823
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200824 hdev = hci_dev_get(dev);
825 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 return -ENODEV;
827
828 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
830 if (!test_bit(HCI_UP, &hdev->flags))
831 goto done;
832
833 /* Drop queues */
834 skb_queue_purge(&hdev->rx_q);
835 skb_queue_purge(&hdev->cmd_q);
836
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300837 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 inquiry_cache_flush(hdev);
839 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300840 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
842 if (hdev->flush)
843 hdev->flush(hdev);
844
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900845 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300846 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200849 ret = __hci_request(hdev, hci_reset_req, 0,
850 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 hci_req_unlock(hdev);
854 hci_dev_put(hdev);
855 return ret;
856}
857
858int hci_dev_reset_stat(__u16 dev)
859{
860 struct hci_dev *hdev;
861 int ret = 0;
862
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200863 hdev = hci_dev_get(dev);
864 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return -ENODEV;
866
867 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
868
869 hci_dev_put(hdev);
870
871 return ret;
872}
873
874int hci_dev_cmd(unsigned int cmd, void __user *arg)
875{
876 struct hci_dev *hdev;
877 struct hci_dev_req dr;
878 int err = 0;
879
880 if (copy_from_user(&dr, arg, sizeof(dr)))
881 return -EFAULT;
882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200883 hdev = hci_dev_get(dr.dev_id);
884 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 return -ENODEV;
886
887 switch (cmd) {
888 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200889 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 break;
892
893 case HCISETENCRYPT:
894 if (!lmp_encrypt_capable(hdev)) {
895 err = -EOPNOTSUPP;
896 break;
897 }
898
899 if (!test_bit(HCI_AUTH, &hdev->flags)) {
900 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200901 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
902 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (err)
904 break;
905 }
906
Marcel Holtmann04837f62006-07-03 10:02:33 +0200907 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 break;
910
911 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200912 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200916 case HCISETLINKPOL:
917 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
918 msecs_to_jiffies(HCI_INIT_TIMEOUT));
919 break;
920
921 case HCISETLINKMODE:
922 hdev->link_mode = ((__u16) dr.dev_opt) &
923 (HCI_LM_MASTER | HCI_LM_ACCEPT);
924 break;
925
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 case HCISETPTYPE:
927 hdev->pkt_type = (__u16) dr.dev_opt;
928 break;
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200931 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
932 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 break;
934
935 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200936 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
937 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 break;
939
940 default:
941 err = -EINVAL;
942 break;
943 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 hci_dev_put(hdev);
946 return err;
947}
948
949int hci_get_dev_list(void __user *arg)
950{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200951 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 struct hci_dev_list_req *dl;
953 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 int n = 0, size, err;
955 __u16 dev_num;
956
957 if (get_user(dev_num, (__u16 __user *) arg))
958 return -EFAULT;
959
960 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
961 return -EINVAL;
962
963 size = sizeof(*dl) + dev_num * sizeof(*dr);
964
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200965 dl = kzalloc(size, GFP_KERNEL);
966 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 return -ENOMEM;
968
969 dr = dl->dev_req;
970
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200971 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200972 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200973 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200974 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200975
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200976 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
977 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 (dr + n)->dev_id = hdev->id;
980 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 if (++n >= dev_num)
983 break;
984 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200985 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 dl->dev_num = n;
988 size = sizeof(*dl) + n * sizeof(*dr);
989
990 err = copy_to_user(arg, dl, size);
991 kfree(dl);
992
993 return err ? -EFAULT : 0;
994}
995
996int hci_get_dev_info(void __user *arg)
997{
998 struct hci_dev *hdev;
999 struct hci_dev_info di;
1000 int err = 0;
1001
1002 if (copy_from_user(&di, arg, sizeof(di)))
1003 return -EFAULT;
1004
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001005 hdev = hci_dev_get(di.dev_id);
1006 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 return -ENODEV;
1008
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001009 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001010 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001011
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001012 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1013 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 strcpy(di.name, hdev->name);
1016 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001017 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 di.flags = hdev->flags;
1019 di.pkt_type = hdev->pkt_type;
1020 di.acl_mtu = hdev->acl_mtu;
1021 di.acl_pkts = hdev->acl_pkts;
1022 di.sco_mtu = hdev->sco_mtu;
1023 di.sco_pkts = hdev->sco_pkts;
1024 di.link_policy = hdev->link_policy;
1025 di.link_mode = hdev->link_mode;
1026
1027 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1028 memcpy(&di.features, &hdev->features, sizeof(di.features));
1029
1030 if (copy_to_user(arg, &di, sizeof(di)))
1031 err = -EFAULT;
1032
1033 hci_dev_put(hdev);
1034
1035 return err;
1036}
1037
1038/* ---- Interface to HCI drivers ---- */
1039
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001040static int hci_rfkill_set_block(void *data, bool blocked)
1041{
1042 struct hci_dev *hdev = data;
1043
1044 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1045
1046 if (!blocked)
1047 return 0;
1048
1049 hci_dev_do_close(hdev);
1050
1051 return 0;
1052}
1053
1054static const struct rfkill_ops hci_rfkill_ops = {
1055 .set_block = hci_rfkill_set_block,
1056};
1057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058/* Alloc HCI device */
1059struct hci_dev *hci_alloc_dev(void)
1060{
1061 struct hci_dev *hdev;
1062
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001063 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 if (!hdev)
1065 return NULL;
1066
David Herrmann0ac7e702011-10-08 14:58:47 +02001067 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 skb_queue_head_init(&hdev->driver_init);
1069
1070 return hdev;
1071}
1072EXPORT_SYMBOL(hci_alloc_dev);
1073
1074/* Free HCI device */
1075void hci_free_dev(struct hci_dev *hdev)
1076{
1077 skb_queue_purge(&hdev->driver_init);
1078
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001079 /* will free via device release */
1080 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081}
1082EXPORT_SYMBOL(hci_free_dev);
1083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001084static void hci_power_on(struct work_struct *work)
1085{
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001094 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001098 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001099}
1100
1101static void hci_power_off(struct work_struct *work)
1102{
Johan Hedberg32435532011-11-07 22:16:04 +02001103 struct hci_dev *hdev = container_of(work, struct hci_dev,
1104 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105
1106 BT_DBG("%s", hdev->name);
1107
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001108 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001109
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001110 hci_dev_close(hdev->id);
1111}
1112
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001113static void hci_discov_off(struct work_struct *work)
1114{
1115 struct hci_dev *hdev;
1116 u8 scan = SCAN_PAGE;
1117
1118 hdev = container_of(work, struct hci_dev, discov_off.work);
1119
1120 BT_DBG("%s", hdev->name);
1121
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001122 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001123
1124 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1125
1126 hdev->discov_timeout = 0;
1127
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001128 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001129}
1130
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001131int hci_uuids_clear(struct hci_dev *hdev)
1132{
1133 struct list_head *p, *n;
1134
1135 list_for_each_safe(p, n, &hdev->uuids) {
1136 struct bt_uuid *uuid;
1137
1138 uuid = list_entry(p, struct bt_uuid, list);
1139
1140 list_del(p);
1141 kfree(uuid);
1142 }
1143
1144 return 0;
1145}
1146
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001147int hci_link_keys_clear(struct hci_dev *hdev)
1148{
1149 struct list_head *p, *n;
1150
1151 list_for_each_safe(p, n, &hdev->link_keys) {
1152 struct link_key *key;
1153
1154 key = list_entry(p, struct link_key, list);
1155
1156 list_del(p);
1157 kfree(key);
1158 }
1159
1160 return 0;
1161}
1162
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001163int hci_smp_ltks_clear(struct hci_dev *hdev)
1164{
1165 struct smp_ltk *k, *tmp;
1166
1167 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1168 list_del(&k->list);
1169 kfree(k);
1170 }
1171
1172 return 0;
1173}
1174
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001175struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1176{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001177 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001178
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001179 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001180 if (bacmp(bdaddr, &k->bdaddr) == 0)
1181 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001182
1183 return NULL;
1184}
1185
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1187 u8 key_type, u8 old_key_type)
1188{
1189 /* Legacy key */
1190 if (key_type < 0x03)
1191 return 1;
1192
1193 /* Debug keys are insecure so don't store them persistently */
1194 if (key_type == HCI_LK_DEBUG_COMBINATION)
1195 return 0;
1196
1197 /* Changed combination key and there's no previous one */
1198 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1199 return 0;
1200
1201 /* Security mode 3 case */
1202 if (!conn)
1203 return 1;
1204
1205 /* Neither local nor remote side had no-bonding as requirement */
1206 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1207 return 1;
1208
1209 /* Local side had dedicated bonding as requirement */
1210 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1211 return 1;
1212
1213 /* Remote side had dedicated bonding as requirement */
1214 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1215 return 1;
1216
1217 /* If none of the above criteria match, then don't store the key
1218 * persistently */
1219 return 0;
1220}
1221
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001222struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001225
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001226 list_for_each_entry(k, &hdev->long_term_keys, list) {
1227 if (k->ediv != ediv ||
1228 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001229 continue;
1230
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232 }
1233
1234 return NULL;
1235}
1236EXPORT_SYMBOL(hci_find_ltk);
1237
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001238struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001241 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001242
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001243 list_for_each_entry(k, &hdev->long_term_keys, list)
1244 if (addr_type == k->bdaddr_type &&
1245 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001246 return k;
1247
1248 return NULL;
1249}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001250EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001252int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1253 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001254{
1255 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001256 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001257
1258 old_key = hci_find_link_key(hdev, bdaddr);
1259 if (old_key) {
1260 old_key_type = old_key->type;
1261 key = old_key;
1262 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001263 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001264 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1265 if (!key)
1266 return -ENOMEM;
1267 list_add(&key->list, &hdev->link_keys);
1268 }
1269
1270 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1271
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001272 /* Some buggy controller combinations generate a changed
1273 * combination key for legacy pairing even when there's no
1274 * previous key */
1275 if (type == HCI_LK_CHANGED_COMBINATION &&
1276 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001277 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001278 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001279 if (conn)
1280 conn->key_type = type;
1281 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001282
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001283 bacpy(&key->bdaddr, bdaddr);
1284 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001285 key->pin_len = pin_len;
1286
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001287 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001288 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001289 else
1290 key->type = type;
1291
Johan Hedberg4df378a2011-04-28 11:29:03 -07001292 if (!new_key)
1293 return 0;
1294
1295 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1296
Johan Hedberg744cf192011-11-08 20:40:14 +02001297 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001298
1299 if (!persistent) {
1300 list_del(&key->list);
1301 kfree(key);
1302 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001303
1304 return 0;
1305}
1306
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001307int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1308 int new_key, u8 authenticated, u8 tk[16],
1309 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001310{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001311 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001312
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001313 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1314 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001316 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1317 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 else {
1320 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321 if (!key)
1322 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001323 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324 }
1325
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 key->bdaddr_type = addr_type;
1328 memcpy(key->val, tk, sizeof(key->val));
1329 key->authenticated = authenticated;
1330 key->ediv = ediv;
1331 key->enc_size = enc_size;
1332 key->type = type;
1333 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001334
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001335 if (!new_key)
1336 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001338 if (type & HCI_SMP_LTK)
1339 mgmt_new_ltk(hdev, key, 1);
1340
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001341 return 0;
1342}
1343
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001344int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1345{
1346 struct link_key *key;
1347
1348 key = hci_find_link_key(hdev, bdaddr);
1349 if (!key)
1350 return -ENOENT;
1351
1352 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1353
1354 list_del(&key->list);
1355 kfree(key);
1356
1357 return 0;
1358}
1359
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001360int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361{
1362 struct smp_ltk *k, *tmp;
1363
1364 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1365 if (bacmp(bdaddr, &k->bdaddr))
1366 continue;
1367
1368 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1369
1370 list_del(&k->list);
1371 kfree(k);
1372 }
1373
1374 return 0;
1375}
1376
Ville Tervo6bd32322011-02-16 16:32:41 +02001377/* HCI command timer function */
1378static void hci_cmd_timer(unsigned long arg)
1379{
1380 struct hci_dev *hdev = (void *) arg;
1381
1382 BT_ERR("%s command tx timeout", hdev->name);
1383 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001384 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001385}
1386
Szymon Janc2763eda2011-03-22 13:12:22 +01001387struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1388 bdaddr_t *bdaddr)
1389{
1390 struct oob_data *data;
1391
1392 list_for_each_entry(data, &hdev->remote_oob_data, list)
1393 if (bacmp(bdaddr, &data->bdaddr) == 0)
1394 return data;
1395
1396 return NULL;
1397}
1398
1399int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1400{
1401 struct oob_data *data;
1402
1403 data = hci_find_remote_oob_data(hdev, bdaddr);
1404 if (!data)
1405 return -ENOENT;
1406
1407 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1408
1409 list_del(&data->list);
1410 kfree(data);
1411
1412 return 0;
1413}
1414
1415int hci_remote_oob_data_clear(struct hci_dev *hdev)
1416{
1417 struct oob_data *data, *n;
1418
1419 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1420 list_del(&data->list);
1421 kfree(data);
1422 }
1423
1424 return 0;
1425}
1426
1427int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1428 u8 *randomizer)
1429{
1430 struct oob_data *data;
1431
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1433
1434 if (!data) {
1435 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1436 if (!data)
1437 return -ENOMEM;
1438
1439 bacpy(&data->bdaddr, bdaddr);
1440 list_add(&data->list, &hdev->remote_oob_data);
1441 }
1442
1443 memcpy(data->hash, hash, sizeof(data->hash));
1444 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1445
1446 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1447
1448 return 0;
1449}
1450
Antti Julkub2a66aa2011-06-15 12:01:14 +03001451struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1452 bdaddr_t *bdaddr)
1453{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001454 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001456 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457 if (bacmp(bdaddr, &b->bdaddr) == 0)
1458 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001459
1460 return NULL;
1461}
1462
1463int hci_blacklist_clear(struct hci_dev *hdev)
1464{
1465 struct list_head *p, *n;
1466
1467 list_for_each_safe(p, n, &hdev->blacklist) {
1468 struct bdaddr_list *b;
1469
1470 b = list_entry(p, struct bdaddr_list, list);
1471
1472 list_del(p);
1473 kfree(b);
1474 }
1475
1476 return 0;
1477}
1478
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001479int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480{
1481 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001482
1483 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1484 return -EBADF;
1485
Antti Julku5e762442011-08-25 16:48:02 +03001486 if (hci_blacklist_lookup(hdev, bdaddr))
1487 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
1489 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001490 if (!entry)
1491 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
1493 bacpy(&entry->bdaddr, bdaddr);
1494
1495 list_add(&entry->list, &hdev->blacklist);
1496
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001497 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001498}
1499
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001500int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501{
1502 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001503
Szymon Janc1ec918c2011-11-16 09:32:21 +01001504 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001505 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506
1507 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001508 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001509 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510
1511 list_del(&entry->list);
1512 kfree(entry);
1513
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001514 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515}
1516
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001517static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001518{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001519 struct hci_dev *hdev = container_of(work, struct hci_dev,
1520 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001521
1522 hci_dev_lock(hdev);
1523
1524 hci_adv_entries_clear(hdev);
1525
1526 hci_dev_unlock(hdev);
1527}
1528
Andre Guedes76c86862011-05-26 16:23:50 -03001529int hci_adv_entries_clear(struct hci_dev *hdev)
1530{
1531 struct adv_entry *entry, *tmp;
1532
1533 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1534 list_del(&entry->list);
1535 kfree(entry);
1536 }
1537
1538 BT_DBG("%s adv cache cleared", hdev->name);
1539
1540 return 0;
1541}
1542
1543struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1544{
1545 struct adv_entry *entry;
1546
1547 list_for_each_entry(entry, &hdev->adv_entries, list)
1548 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1549 return entry;
1550
1551 return NULL;
1552}
1553
1554static inline int is_connectable_adv(u8 evt_type)
1555{
1556 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1557 return 1;
1558
1559 return 0;
1560}
1561
1562int hci_add_adv_entry(struct hci_dev *hdev,
1563 struct hci_ev_le_advertising_info *ev)
1564{
1565 struct adv_entry *entry;
1566
1567 if (!is_connectable_adv(ev->evt_type))
1568 return -EINVAL;
1569
1570 /* Only new entries should be added to adv_entries. So, if
1571 * bdaddr was found, don't add it. */
1572 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1573 return 0;
1574
Andre Guedes4777bfd2012-01-30 23:31:28 -03001575 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001576 if (!entry)
1577 return -ENOMEM;
1578
1579 bacpy(&entry->bdaddr, &ev->bdaddr);
1580 entry->bdaddr_type = ev->bdaddr_type;
1581
1582 list_add(&entry->list, &hdev->adv_entries);
1583
1584 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1585 batostr(&entry->bdaddr), entry->bdaddr_type);
1586
1587 return 0;
1588}
1589
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001590static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1591{
1592 struct le_scan_params *param = (struct le_scan_params *) opt;
1593 struct hci_cp_le_set_scan_param cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.type = param->type;
1597 cp.interval = cpu_to_le16(param->interval);
1598 cp.window = cpu_to_le16(param->window);
1599
1600 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1601}
1602
1603static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1604{
1605 struct hci_cp_le_set_scan_enable cp;
1606
1607 memset(&cp, 0, sizeof(cp));
1608 cp.enable = 1;
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611}
1612
1613static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1614 u16 window, int timeout)
1615{
1616 long timeo = msecs_to_jiffies(3000);
1617 struct le_scan_params param;
1618 int err;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1623 return -EINPROGRESS;
1624
1625 param.type = type;
1626 param.interval = interval;
1627 param.window = window;
1628
1629 hci_req_lock(hdev);
1630
1631 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1632 timeo);
1633 if (!err)
1634 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1635
1636 hci_req_unlock(hdev);
1637
1638 if (err < 0)
1639 return err;
1640
1641 schedule_delayed_work(&hdev->le_scan_disable,
1642 msecs_to_jiffies(timeout));
1643
1644 return 0;
1645}
1646
1647static void le_scan_disable_work(struct work_struct *work)
1648{
1649 struct hci_dev *hdev = container_of(work, struct hci_dev,
1650 le_scan_disable.work);
1651 struct hci_cp_le_set_scan_enable cp;
1652
1653 BT_DBG("%s", hdev->name);
1654
1655 memset(&cp, 0, sizeof(cp));
1656
1657 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1658}
1659
Andre Guedes28b75a82012-02-03 17:48:00 -03001660static void le_scan_work(struct work_struct *work)
1661{
1662 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1663 struct le_scan_params *param = &hdev->le_scan_params;
1664
1665 BT_DBG("%s", hdev->name);
1666
1667 hci_do_le_scan(hdev, param->type, param->interval,
1668 param->window, param->timeout);
1669}
1670
1671int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1672 int timeout)
1673{
1674 struct le_scan_params *param = &hdev->le_scan_params;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 if (work_busy(&hdev->le_scan))
1679 return -EINPROGRESS;
1680
1681 param->type = type;
1682 param->interval = interval;
1683 param->window = window;
1684 param->timeout = timeout;
1685
1686 queue_work(system_long_wq, &hdev->le_scan);
1687
1688 return 0;
1689}
1690
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691/* Register HCI device */
1692int hci_register_dev(struct hci_dev *hdev)
1693{
1694 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001695 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001697 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
David Herrmann010666a2012-01-07 15:47:07 +01001699 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return -EINVAL;
1701
Mat Martineau08add512011-11-02 16:18:36 -07001702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
1705 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1706
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001707 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
1709 /* Find first available device id */
1710 list_for_each(p, &hci_dev_list) {
1711 if (list_entry(p, struct hci_dev, list)->id != id)
1712 break;
1713 head = p; id++;
1714 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001718 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001720 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001723 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001725 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001727 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Marcel Holtmann04837f62006-07-03 10:02:33 +02001729 hdev->idle_timeout = 0;
1730 hdev->sniff_max_interval = 800;
1731 hdev->sniff_min_interval = 80;
1732
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001733 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001734 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001735 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 skb_queue_head_init(&hdev->rx_q);
1739 skb_queue_head_init(&hdev->cmd_q);
1740 skb_queue_head_init(&hdev->raw_q);
1741
Ville Tervo6bd32322011-02-16 16:32:41 +02001742 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1743
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301744 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001745 hdev->reassembly[i] = NULL;
1746
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001748 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
Johan Hedberg30883512012-01-04 14:16:21 +02001750 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 hci_conn_hash_init(hdev);
1753
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001754 INIT_LIST_HEAD(&hdev->mgmt_pending);
1755
David Millerea4bd8b2010-07-30 21:54:49 -07001756 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001757
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001758 INIT_LIST_HEAD(&hdev->uuids);
1759
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001761 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001762
Szymon Janc2763eda2011-03-22 13:12:22 +01001763 INIT_LIST_HEAD(&hdev->remote_oob_data);
1764
Andre Guedes76c86862011-05-26 16:23:50 -03001765 INIT_LIST_HEAD(&hdev->adv_entries);
1766
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001767 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001768 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001769 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001770
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001771 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1774
1775 atomic_set(&hdev->promisc, 0);
1776
Andre Guedes28b75a82012-02-03 17:48:00 -03001777 INIT_WORK(&hdev->le_scan, le_scan_work);
1778
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001779 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1780
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001781 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001783 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1784 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001785 if (!hdev->workqueue) {
1786 error = -ENOMEM;
1787 goto err;
1788 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001789
David Herrmann33ca9542011-10-08 14:58:49 +02001790 error = hci_add_sysfs(hdev);
1791 if (error < 0)
1792 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001794 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1795 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1796 if (hdev->rfkill) {
1797 if (rfkill_register(hdev->rfkill) < 0) {
1798 rfkill_destroy(hdev->rfkill);
1799 hdev->rfkill = NULL;
1800 }
1801 }
1802
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001803 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1804 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001805 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001808 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
1810 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001811
David Herrmann33ca9542011-10-08 14:58:49 +02001812err_wqueue:
1813 destroy_workqueue(hdev->workqueue);
1814err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001815 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001816 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001817 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001818
David Herrmann33ca9542011-10-08 14:58:49 +02001819 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820}
1821EXPORT_SYMBOL(hci_register_dev);
1822
1823/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001824void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825{
Marcel Holtmannef222012007-07-11 06:42:04 +02001826 int i;
1827
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001828 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001830 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001832 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 hci_dev_do_close(hdev);
1835
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301836 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001837 kfree_skb(hdev->reassembly[i]);
1838
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001839 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001840 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001841 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001842 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001843 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001844 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001845
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001846 /* mgmt_index_removed should take care of emptying the
1847 * pending list */
1848 BUG_ON(!list_empty(&hdev->mgmt_pending));
1849
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850 hci_notify(hdev, HCI_DEV_UNREG);
1851
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001852 if (hdev->rfkill) {
1853 rfkill_unregister(hdev->rfkill);
1854 rfkill_destroy(hdev->rfkill);
1855 }
1856
David Herrmannce242972011-10-08 14:58:48 +02001857 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001858
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001859 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001860
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001861 destroy_workqueue(hdev->workqueue);
1862
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001864 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001865 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001866 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001867 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001868 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001869 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001870 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001871
David Herrmanndc946bd2012-01-07 15:47:24 +01001872 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873}
1874EXPORT_SYMBOL(hci_unregister_dev);
1875
1876/* Suspend HCI device */
1877int hci_suspend_dev(struct hci_dev *hdev)
1878{
1879 hci_notify(hdev, HCI_DEV_SUSPEND);
1880 return 0;
1881}
1882EXPORT_SYMBOL(hci_suspend_dev);
1883
1884/* Resume HCI device */
1885int hci_resume_dev(struct hci_dev *hdev)
1886{
1887 hci_notify(hdev, HCI_DEV_RESUME);
1888 return 0;
1889}
1890EXPORT_SYMBOL(hci_resume_dev);
1891
Marcel Holtmann76bca882009-11-18 00:40:39 +01001892/* Receive frame from HCI drivers */
1893int hci_recv_frame(struct sk_buff *skb)
1894{
1895 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1896 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1897 && !test_bit(HCI_INIT, &hdev->flags))) {
1898 kfree_skb(skb);
1899 return -ENXIO;
1900 }
1901
1902 /* Incomming skb */
1903 bt_cb(skb)->incoming = 1;
1904
1905 /* Time stamp */
1906 __net_timestamp(skb);
1907
Marcel Holtmann76bca882009-11-18 00:40:39 +01001908 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001909 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001910
Marcel Holtmann76bca882009-11-18 00:40:39 +01001911 return 0;
1912}
1913EXPORT_SYMBOL(hci_recv_frame);
1914
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301915static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001916 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301917{
1918 int len = 0;
1919 int hlen = 0;
1920 int remain = count;
1921 struct sk_buff *skb;
1922 struct bt_skb_cb *scb;
1923
1924 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1925 index >= NUM_REASSEMBLY)
1926 return -EILSEQ;
1927
1928 skb = hdev->reassembly[index];
1929
1930 if (!skb) {
1931 switch (type) {
1932 case HCI_ACLDATA_PKT:
1933 len = HCI_MAX_FRAME_SIZE;
1934 hlen = HCI_ACL_HDR_SIZE;
1935 break;
1936 case HCI_EVENT_PKT:
1937 len = HCI_MAX_EVENT_SIZE;
1938 hlen = HCI_EVENT_HDR_SIZE;
1939 break;
1940 case HCI_SCODATA_PKT:
1941 len = HCI_MAX_SCO_SIZE;
1942 hlen = HCI_SCO_HDR_SIZE;
1943 break;
1944 }
1945
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001946 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301947 if (!skb)
1948 return -ENOMEM;
1949
1950 scb = (void *) skb->cb;
1951 scb->expect = hlen;
1952 scb->pkt_type = type;
1953
1954 skb->dev = (void *) hdev;
1955 hdev->reassembly[index] = skb;
1956 }
1957
1958 while (count) {
1959 scb = (void *) skb->cb;
1960 len = min(scb->expect, (__u16)count);
1961
1962 memcpy(skb_put(skb, len), data, len);
1963
1964 count -= len;
1965 data += len;
1966 scb->expect -= len;
1967 remain = count;
1968
1969 switch (type) {
1970 case HCI_EVENT_PKT:
1971 if (skb->len == HCI_EVENT_HDR_SIZE) {
1972 struct hci_event_hdr *h = hci_event_hdr(skb);
1973 scb->expect = h->plen;
1974
1975 if (skb_tailroom(skb) < scb->expect) {
1976 kfree_skb(skb);
1977 hdev->reassembly[index] = NULL;
1978 return -ENOMEM;
1979 }
1980 }
1981 break;
1982
1983 case HCI_ACLDATA_PKT:
1984 if (skb->len == HCI_ACL_HDR_SIZE) {
1985 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1986 scb->expect = __le16_to_cpu(h->dlen);
1987
1988 if (skb_tailroom(skb) < scb->expect) {
1989 kfree_skb(skb);
1990 hdev->reassembly[index] = NULL;
1991 return -ENOMEM;
1992 }
1993 }
1994 break;
1995
1996 case HCI_SCODATA_PKT:
1997 if (skb->len == HCI_SCO_HDR_SIZE) {
1998 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1999 scb->expect = h->dlen;
2000
2001 if (skb_tailroom(skb) < scb->expect) {
2002 kfree_skb(skb);
2003 hdev->reassembly[index] = NULL;
2004 return -ENOMEM;
2005 }
2006 }
2007 break;
2008 }
2009
2010 if (scb->expect == 0) {
2011 /* Complete frame */
2012
2013 bt_cb(skb)->pkt_type = type;
2014 hci_recv_frame(skb);
2015
2016 hdev->reassembly[index] = NULL;
2017 return remain;
2018 }
2019 }
2020
2021 return remain;
2022}
2023
Marcel Holtmannef222012007-07-11 06:42:04 +02002024int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2025{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302026 int rem = 0;
2027
Marcel Holtmannef222012007-07-11 06:42:04 +02002028 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2029 return -EILSEQ;
2030
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002031 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002032 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302033 if (rem < 0)
2034 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002035
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302036 data += (count - rem);
2037 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002038 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002039
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302040 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002041}
2042EXPORT_SYMBOL(hci_recv_fragment);
2043
Suraj Sumangala99811512010-07-14 13:02:19 +05302044#define STREAM_REASSEMBLY 0
2045
2046int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2047{
2048 int type;
2049 int rem = 0;
2050
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002051 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302052 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2053
2054 if (!skb) {
2055 struct { char type; } *pkt;
2056
2057 /* Start of the frame */
2058 pkt = data;
2059 type = pkt->type;
2060
2061 data++;
2062 count--;
2063 } else
2064 type = bt_cb(skb)->pkt_type;
2065
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002066 rem = hci_reassembly(hdev, type, data, count,
2067 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302068 if (rem < 0)
2069 return rem;
2070
2071 data += (count - rem);
2072 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002073 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302074
2075 return rem;
2076}
2077EXPORT_SYMBOL(hci_recv_stream_fragment);
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079/* ---- Interface to upper protocols ---- */
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081int hci_register_cb(struct hci_cb *cb)
2082{
2083 BT_DBG("%p name %s", cb, cb->name);
2084
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002085 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002087 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088
2089 return 0;
2090}
2091EXPORT_SYMBOL(hci_register_cb);
2092
2093int hci_unregister_cb(struct hci_cb *cb)
2094{
2095 BT_DBG("%p name %s", cb, cb->name);
2096
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002097 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002099 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
2101 return 0;
2102}
2103EXPORT_SYMBOL(hci_unregister_cb);
2104
2105static int hci_send_frame(struct sk_buff *skb)
2106{
2107 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2108
2109 if (!hdev) {
2110 kfree_skb(skb);
2111 return -ENODEV;
2112 }
2113
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002114 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002116 /* Time stamp */
2117 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002119 /* Send copy to monitor */
2120 hci_send_to_monitor(hdev, skb);
2121
2122 if (atomic_read(&hdev->promisc)) {
2123 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002124 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 }
2126
2127 /* Get rid of skb owner, prior to sending to the driver. */
2128 skb_orphan(skb);
2129
2130 return hdev->send(skb);
2131}
2132
2133/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002134int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135{
2136 int len = HCI_COMMAND_HDR_SIZE + plen;
2137 struct hci_command_hdr *hdr;
2138 struct sk_buff *skb;
2139
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002140 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
2142 skb = bt_skb_alloc(len, GFP_ATOMIC);
2143 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002144 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 return -ENOMEM;
2146 }
2147
2148 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002149 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 hdr->plen = plen;
2151
2152 if (plen)
2153 memcpy(skb_put(skb, plen), param, plen);
2154
2155 BT_DBG("skb len %d", skb->len);
2156
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002157 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002159
Johan Hedberga5040ef2011-01-10 13:28:59 +02002160 if (test_bit(HCI_INIT, &hdev->flags))
2161 hdev->init_last_cmd = opcode;
2162
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002164 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 return 0;
2167}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002170void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171{
2172 struct hci_command_hdr *hdr;
2173
2174 if (!hdev->sent_cmd)
2175 return NULL;
2176
2177 hdr = (void *) hdev->sent_cmd->data;
2178
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002179 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 return NULL;
2181
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002182 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2185}
2186
2187/* Send ACL data */
2188static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2189{
2190 struct hci_acl_hdr *hdr;
2191 int len = skb->len;
2192
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002193 skb_push(skb, HCI_ACL_HDR_SIZE);
2194 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002195 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002196 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2197 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198}
2199
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002200static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2201 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
2203 struct hci_dev *hdev = conn->hdev;
2204 struct sk_buff *list;
2205
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002206 list = skb_shinfo(skb)->frag_list;
2207 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 /* Non fragmented */
2209 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2210
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002211 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 } else {
2213 /* Fragmented */
2214 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2215
2216 skb_shinfo(skb)->frag_list = NULL;
2217
2218 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002219 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002221 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002222
2223 flags &= ~ACL_START;
2224 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 do {
2226 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002229 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002230 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2233
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002234 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 } while (list);
2236
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002237 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002239}
2240
2241void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2242{
2243 struct hci_conn *conn = chan->conn;
2244 struct hci_dev *hdev = conn->hdev;
2245
2246 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2247
2248 skb->dev = (void *) hdev;
2249 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2250 hci_add_acl_hdr(skb, conn->handle, flags);
2251
2252 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002254 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255}
2256EXPORT_SYMBOL(hci_send_acl);
2257
2258/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002259void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260{
2261 struct hci_dev *hdev = conn->hdev;
2262 struct hci_sco_hdr hdr;
2263
2264 BT_DBG("%s len %d", hdev->name, skb->len);
2265
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002266 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 hdr.dlen = skb->len;
2268
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002269 skb_push(skb, HCI_SCO_HDR_SIZE);
2270 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002271 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
2273 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002274 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002277 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278}
2279EXPORT_SYMBOL(hci_send_sco);
2280
2281/* ---- HCI TX task (outgoing data) ---- */
2282
2283/* HCI Connection scheduler */
2284static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2285{
2286 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002287 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002290 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002292
2293 rcu_read_lock();
2294
2295 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002296 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002298
2299 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2300 continue;
2301
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 num++;
2303
2304 if (c->sent < min) {
2305 min = c->sent;
2306 conn = c;
2307 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002308
2309 if (hci_conn_num(hdev, type) == num)
2310 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 }
2312
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002313 rcu_read_unlock();
2314
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002316 int cnt, q;
2317
2318 switch (conn->type) {
2319 case ACL_LINK:
2320 cnt = hdev->acl_cnt;
2321 break;
2322 case SCO_LINK:
2323 case ESCO_LINK:
2324 cnt = hdev->sco_cnt;
2325 break;
2326 case LE_LINK:
2327 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328 break;
2329 default:
2330 cnt = 0;
2331 BT_ERR("Unknown link type");
2332 }
2333
2334 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 *quote = q ? q : 1;
2336 } else
2337 *quote = 0;
2338
2339 BT_DBG("conn %p quote %d", conn, *quote);
2340 return conn;
2341}
2342
Ville Tervobae1f5d92011-02-10 22:38:53 -03002343static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344{
2345 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002346 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Ville Tervobae1f5d92011-02-10 22:38:53 -03002348 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002350 rcu_read_lock();
2351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002353 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002354 if (c->type == type && c->sent) {
2355 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 hdev->name, batostr(&c->dst));
2357 hci_acl_disconn(c, 0x13);
2358 }
2359 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002360
2361 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
2363
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002364static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2365 int *quote)
2366{
2367 struct hci_conn_hash *h = &hdev->conn_hash;
2368 struct hci_chan *chan = NULL;
2369 int num = 0, min = ~0, cur_prio = 0;
2370 struct hci_conn *conn;
2371 int cnt, q, conn_num = 0;
2372
2373 BT_DBG("%s", hdev->name);
2374
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002375 rcu_read_lock();
2376
2377 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002378 struct hci_chan *tmp;
2379
2380 if (conn->type != type)
2381 continue;
2382
2383 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2384 continue;
2385
2386 conn_num++;
2387
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002388 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002389 struct sk_buff *skb;
2390
2391 if (skb_queue_empty(&tmp->data_q))
2392 continue;
2393
2394 skb = skb_peek(&tmp->data_q);
2395 if (skb->priority < cur_prio)
2396 continue;
2397
2398 if (skb->priority > cur_prio) {
2399 num = 0;
2400 min = ~0;
2401 cur_prio = skb->priority;
2402 }
2403
2404 num++;
2405
2406 if (conn->sent < min) {
2407 min = conn->sent;
2408 chan = tmp;
2409 }
2410 }
2411
2412 if (hci_conn_num(hdev, type) == conn_num)
2413 break;
2414 }
2415
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002416 rcu_read_unlock();
2417
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002418 if (!chan)
2419 return NULL;
2420
2421 switch (chan->conn->type) {
2422 case ACL_LINK:
2423 cnt = hdev->acl_cnt;
2424 break;
2425 case SCO_LINK:
2426 case ESCO_LINK:
2427 cnt = hdev->sco_cnt;
2428 break;
2429 case LE_LINK:
2430 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2431 break;
2432 default:
2433 cnt = 0;
2434 BT_ERR("Unknown link type");
2435 }
2436
2437 q = cnt / num;
2438 *quote = q ? q : 1;
2439 BT_DBG("chan %p quote %d", chan, *quote);
2440 return chan;
2441}
2442
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002443static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2444{
2445 struct hci_conn_hash *h = &hdev->conn_hash;
2446 struct hci_conn *conn;
2447 int num = 0;
2448
2449 BT_DBG("%s", hdev->name);
2450
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002451 rcu_read_lock();
2452
2453 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002454 struct hci_chan *chan;
2455
2456 if (conn->type != type)
2457 continue;
2458
2459 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2460 continue;
2461
2462 num++;
2463
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002464 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002465 struct sk_buff *skb;
2466
2467 if (chan->sent) {
2468 chan->sent = 0;
2469 continue;
2470 }
2471
2472 if (skb_queue_empty(&chan->data_q))
2473 continue;
2474
2475 skb = skb_peek(&chan->data_q);
2476 if (skb->priority >= HCI_PRIO_MAX - 1)
2477 continue;
2478
2479 skb->priority = HCI_PRIO_MAX - 1;
2480
2481 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2482 skb->priority);
2483 }
2484
2485 if (hci_conn_num(hdev, type) == num)
2486 break;
2487 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002488
2489 rcu_read_unlock();
2490
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002491}
2492
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002493static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2494{
2495 /* Calculate count of blocks used by this packet */
2496 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2497}
2498
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002499static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 if (!test_bit(HCI_RAW, &hdev->flags)) {
2502 /* ACL tx timeout must be longer than maximum
2503 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002504 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002505 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002506 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002510static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2511{
2512 unsigned int cnt = hdev->acl_cnt;
2513 struct hci_chan *chan;
2514 struct sk_buff *skb;
2515 int quote;
2516
2517 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002518
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002519 while (hdev->acl_cnt &&
2520 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002521 u32 priority = (skb_peek(&chan->data_q))->priority;
2522 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002523 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2524 skb->len, skb->priority);
2525
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002526 /* Stop if priority has changed */
2527 if (skb->priority < priority)
2528 break;
2529
2530 skb = skb_dequeue(&chan->data_q);
2531
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002532 hci_conn_enter_active_mode(chan->conn,
2533 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 hci_send_frame(skb);
2536 hdev->acl_last_tx = jiffies;
2537
2538 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002539 chan->sent++;
2540 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 }
2542 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002543
2544 if (cnt != hdev->acl_cnt)
2545 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546}
2547
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002548static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2549{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002550 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002551 struct hci_chan *chan;
2552 struct sk_buff *skb;
2553 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002554
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002555 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002556
2557 while (hdev->block_cnt > 0 &&
2558 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2559 u32 priority = (skb_peek(&chan->data_q))->priority;
2560 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2561 int blocks;
2562
2563 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2564 skb->len, skb->priority);
2565
2566 /* Stop if priority has changed */
2567 if (skb->priority < priority)
2568 break;
2569
2570 skb = skb_dequeue(&chan->data_q);
2571
2572 blocks = __get_blocks(hdev, skb);
2573 if (blocks > hdev->block_cnt)
2574 return;
2575
2576 hci_conn_enter_active_mode(chan->conn,
2577 bt_cb(skb)->force_active);
2578
2579 hci_send_frame(skb);
2580 hdev->acl_last_tx = jiffies;
2581
2582 hdev->block_cnt -= blocks;
2583 quote -= blocks;
2584
2585 chan->sent += blocks;
2586 chan->conn->sent += blocks;
2587 }
2588 }
2589
2590 if (cnt != hdev->block_cnt)
2591 hci_prio_recalculate(hdev, ACL_LINK);
2592}
2593
2594static inline void hci_sched_acl(struct hci_dev *hdev)
2595{
2596 BT_DBG("%s", hdev->name);
2597
2598 if (!hci_conn_num(hdev, ACL_LINK))
2599 return;
2600
2601 switch (hdev->flow_ctl_mode) {
2602 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2603 hci_sched_acl_pkt(hdev);
2604 break;
2605
2606 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2607 hci_sched_acl_blk(hdev);
2608 break;
2609 }
2610}
2611
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612/* Schedule SCO */
2613static inline void hci_sched_sco(struct hci_dev *hdev)
2614{
2615 struct hci_conn *conn;
2616 struct sk_buff *skb;
2617 int quote;
2618
2619 BT_DBG("%s", hdev->name);
2620
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002621 if (!hci_conn_num(hdev, SCO_LINK))
2622 return;
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2625 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2626 BT_DBG("skb %p len %d", skb, skb->len);
2627 hci_send_frame(skb);
2628
2629 conn->sent++;
2630 if (conn->sent == ~0)
2631 conn->sent = 0;
2632 }
2633 }
2634}
2635
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002636static inline void hci_sched_esco(struct hci_dev *hdev)
2637{
2638 struct hci_conn *conn;
2639 struct sk_buff *skb;
2640 int quote;
2641
2642 BT_DBG("%s", hdev->name);
2643
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002644 if (!hci_conn_num(hdev, ESCO_LINK))
2645 return;
2646
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002647 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2648 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2649 BT_DBG("skb %p len %d", skb, skb->len);
2650 hci_send_frame(skb);
2651
2652 conn->sent++;
2653 if (conn->sent == ~0)
2654 conn->sent = 0;
2655 }
2656 }
2657}
2658
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002659static inline void hci_sched_le(struct hci_dev *hdev)
2660{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002661 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002662 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002663 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002664
2665 BT_DBG("%s", hdev->name);
2666
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002667 if (!hci_conn_num(hdev, LE_LINK))
2668 return;
2669
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002670 if (!test_bit(HCI_RAW, &hdev->flags)) {
2671 /* LE tx timeout must be longer than maximum
2672 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002673 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002675 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002676 }
2677
2678 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002679 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002680 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002681 u32 priority = (skb_peek(&chan->data_q))->priority;
2682 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002683 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2684 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002685
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002686 /* Stop if priority has changed */
2687 if (skb->priority < priority)
2688 break;
2689
2690 skb = skb_dequeue(&chan->data_q);
2691
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002692 hci_send_frame(skb);
2693 hdev->le_last_tx = jiffies;
2694
2695 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002696 chan->sent++;
2697 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002698 }
2699 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002700
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002701 if (hdev->le_pkts)
2702 hdev->le_cnt = cnt;
2703 else
2704 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002705
2706 if (cnt != tmp)
2707 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708}
2709
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002710static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002712 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 struct sk_buff *skb;
2714
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002715 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2716 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717
2718 /* Schedule queues and send stuff to HCI driver */
2719
2720 hci_sched_acl(hdev);
2721
2722 hci_sched_sco(hdev);
2723
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002724 hci_sched_esco(hdev);
2725
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002726 hci_sched_le(hdev);
2727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 /* Send next queued raw (unknown type) packet */
2729 while ((skb = skb_dequeue(&hdev->raw_q)))
2730 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731}
2732
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002733/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734
2735/* ACL data packet */
2736static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2737{
2738 struct hci_acl_hdr *hdr = (void *) skb->data;
2739 struct hci_conn *conn;
2740 __u16 handle, flags;
2741
2742 skb_pull(skb, HCI_ACL_HDR_SIZE);
2743
2744 handle = __le16_to_cpu(hdr->handle);
2745 flags = hci_flags(handle);
2746 handle = hci_handle(handle);
2747
2748 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2749
2750 hdev->stat.acl_rx++;
2751
2752 hci_dev_lock(hdev);
2753 conn = hci_conn_hash_lookup_handle(hdev, handle);
2754 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002755
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002757 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002758
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002760 l2cap_recv_acldata(conn, skb, flags);
2761 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002763 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 hdev->name, handle);
2765 }
2766
2767 kfree_skb(skb);
2768}
2769
2770/* SCO data packet */
2771static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2772{
2773 struct hci_sco_hdr *hdr = (void *) skb->data;
2774 struct hci_conn *conn;
2775 __u16 handle;
2776
2777 skb_pull(skb, HCI_SCO_HDR_SIZE);
2778
2779 handle = __le16_to_cpu(hdr->handle);
2780
2781 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2782
2783 hdev->stat.sco_rx++;
2784
2785 hci_dev_lock(hdev);
2786 conn = hci_conn_hash_lookup_handle(hdev, handle);
2787 hci_dev_unlock(hdev);
2788
2789 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002791 sco_recv_scodata(conn, skb);
2792 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002794 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 hdev->name, handle);
2796 }
2797
2798 kfree_skb(skb);
2799}
2800
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002801static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002803 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 struct sk_buff *skb;
2805
2806 BT_DBG("%s", hdev->name);
2807
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002809 /* Send copy to monitor */
2810 hci_send_to_monitor(hdev, skb);
2811
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 if (atomic_read(&hdev->promisc)) {
2813 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002814 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 }
2816
2817 if (test_bit(HCI_RAW, &hdev->flags)) {
2818 kfree_skb(skb);
2819 continue;
2820 }
2821
2822 if (test_bit(HCI_INIT, &hdev->flags)) {
2823 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002824 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 case HCI_ACLDATA_PKT:
2826 case HCI_SCODATA_PKT:
2827 kfree_skb(skb);
2828 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002829 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 }
2831
2832 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002833 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002835 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 hci_event_packet(hdev, skb);
2837 break;
2838
2839 case HCI_ACLDATA_PKT:
2840 BT_DBG("%s ACL data packet", hdev->name);
2841 hci_acldata_packet(hdev, skb);
2842 break;
2843
2844 case HCI_SCODATA_PKT:
2845 BT_DBG("%s SCO data packet", hdev->name);
2846 hci_scodata_packet(hdev, skb);
2847 break;
2848
2849 default:
2850 kfree_skb(skb);
2851 break;
2852 }
2853 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854}
2855
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002856static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002858 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 struct sk_buff *skb;
2860
2861 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002864 if (atomic_read(&hdev->cmd_cnt)) {
2865 skb = skb_dequeue(&hdev->cmd_q);
2866 if (!skb)
2867 return;
2868
Wei Yongjun7585b972009-02-25 18:29:52 +08002869 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002871 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2872 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 atomic_dec(&hdev->cmd_cnt);
2874 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002875 if (test_bit(HCI_RESET, &hdev->flags))
2876 del_timer(&hdev->cmd_timer);
2877 else
2878 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002879 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 } else {
2881 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002882 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 }
2884 }
2885}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002886
2887int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2888{
2889 /* General inquiry access code (GIAC) */
2890 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2891 struct hci_cp_inquiry cp;
2892
2893 BT_DBG("%s", hdev->name);
2894
2895 if (test_bit(HCI_INQUIRY, &hdev->flags))
2896 return -EINPROGRESS;
2897
Johan Hedberg46632622012-01-02 16:06:08 +02002898 inquiry_cache_flush(hdev);
2899
Andre Guedes2519a1f2011-11-07 11:45:24 -03002900 memset(&cp, 0, sizeof(cp));
2901 memcpy(&cp.lap, lap, sizeof(cp.lap));
2902 cp.length = length;
2903
2904 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2905}
Andre Guedes023d50492011-11-04 14:16:52 -03002906
2907int hci_cancel_inquiry(struct hci_dev *hdev)
2908{
2909 BT_DBG("%s", hdev->name);
2910
2911 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2912 return -EPERM;
2913
2914 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2915}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002916
2917module_param(enable_hs, bool, 0644);
2918MODULE_PARM_DESC(enable_hs, "Enable High Speed");