blob: 1705d9372725f5d9b7af2aeb40b65124b0ea3b7e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
Andre Guedesc5990082012-02-03 17:47:57 -0300364 discov->state == DISCOVERY_LE_SCAN ||
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200365 discov->state == DISCOVERY_RESOLVING)
366 return true;
367
368 return false;
369}
370
Johan Hedbergff9ef572012-01-04 14:23:45 +0200371void hci_discovery_set_state(struct hci_dev *hdev, int state)
372{
373 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
374
375 if (hdev->discovery.state == state)
376 return;
377
378 switch (state) {
379 case DISCOVERY_STOPPED:
380 mgmt_discovering(hdev, 0);
381 break;
382 case DISCOVERY_STARTING:
383 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200384 case DISCOVERY_INQUIRY:
Andre Guedesc5990082012-02-03 17:47:57 -0300385 case DISCOVERY_LE_SCAN:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200386 mgmt_discovering(hdev, 1);
387 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200388 case DISCOVERY_RESOLVING:
389 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200390 case DISCOVERY_STOPPING:
391 break;
392 }
393
394 hdev->discovery.state = state;
395}
396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397static void inquiry_cache_flush(struct hci_dev *hdev)
398{
Johan Hedberg30883512012-01-04 14:16:21 +0200399 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200400 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Johan Hedberg561aafb2012-01-04 13:31:59 +0200402 list_for_each_entry_safe(p, n, &cache->all, all) {
403 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200404 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200406
407 INIT_LIST_HEAD(&cache->unknown);
408 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200409 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
412struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
413{
Johan Hedberg30883512012-01-04 14:16:21 +0200414 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 struct inquiry_entry *e;
416
417 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
418
Johan Hedberg561aafb2012-01-04 13:31:59 +0200419 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200421 return e;
422 }
423
424 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425}
426
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
428 bdaddr_t *bdaddr)
429{
Johan Hedberg30883512012-01-04 14:16:21 +0200430 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200431 struct inquiry_entry *e;
432
433 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
434
435 list_for_each_entry(e, &cache->unknown, list) {
436 if (!bacmp(&e->data.bdaddr, bdaddr))
437 return e;
438 }
439
440 return NULL;
441}
442
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200443struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
444 bdaddr_t *bdaddr,
445 int state)
446{
447 struct discovery_state *cache = &hdev->discovery;
448 struct inquiry_entry *e;
449
450 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
451
452 list_for_each_entry(e, &cache->resolve, list) {
453 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
454 return e;
455 if (!bacmp(&e->data.bdaddr, bdaddr))
456 return e;
457 }
458
459 return NULL;
460}
461
Johan Hedberga3d4e202012-01-09 00:53:02 +0200462void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
463 struct inquiry_entry *ie)
464{
465 struct discovery_state *cache = &hdev->discovery;
466 struct list_head *pos = &cache->resolve;
467 struct inquiry_entry *p;
468
469 list_del(&ie->list);
470
471 list_for_each_entry(p, &cache->resolve, list) {
472 if (p->name_state != NAME_PENDING &&
473 abs(p->data.rssi) >= abs(ie->data.rssi))
474 break;
475 pos = &p->list;
476 }
477
478 list_add(&ie->list, pos);
479}
480
Johan Hedberg31754052012-01-04 13:39:52 +0200481bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200482 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Johan Hedberg30883512012-01-04 14:16:21 +0200484 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200485 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
488
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200490 if (ie) {
491 if (ie->name_state == NAME_NEEDED &&
492 data->rssi != ie->data.rssi) {
493 ie->data.rssi = data->rssi;
494 hci_inquiry_cache_update_resolve(hdev, ie);
495 }
496
Johan Hedberg561aafb2012-01-04 13:31:59 +0200497 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200498 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200499
Johan Hedberg561aafb2012-01-04 13:31:59 +0200500 /* Entry not in the cache. Add new one. */
501 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
502 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200503 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200504
505 list_add(&ie->all, &cache->all);
506
507 if (name_known) {
508 ie->name_state = NAME_KNOWN;
509 } else {
510 ie->name_state = NAME_NOT_KNOWN;
511 list_add(&ie->list, &cache->unknown);
512 }
513
514update:
515 if (name_known && ie->name_state != NAME_KNOWN &&
516 ie->name_state != NAME_PENDING) {
517 ie->name_state = NAME_KNOWN;
518 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 }
520
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200521 memcpy(&ie->data, data, sizeof(*data));
522 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200524
525 if (ie->name_state == NAME_NOT_KNOWN)
526 return false;
527
528 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
531static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
532{
Johan Hedberg30883512012-01-04 14:16:21 +0200533 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 struct inquiry_info *info = (struct inquiry_info *) buf;
535 struct inquiry_entry *e;
536 int copied = 0;
537
Johan Hedberg561aafb2012-01-04 13:31:59 +0200538 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200540
541 if (copied >= num)
542 break;
543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 bacpy(&info->bdaddr, &data->bdaddr);
545 info->pscan_rep_mode = data->pscan_rep_mode;
546 info->pscan_period_mode = data->pscan_period_mode;
547 info->pscan_mode = data->pscan_mode;
548 memcpy(info->dev_class, data->dev_class, 3);
549 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200550
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
554
555 BT_DBG("cache %p, copied %d", cache, copied);
556 return copied;
557}
558
559static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
560{
561 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
562 struct hci_cp_inquiry cp;
563
564 BT_DBG("%s", hdev->name);
565
566 if (test_bit(HCI_INQUIRY, &hdev->flags))
567 return;
568
569 /* Start Inquiry */
570 memcpy(&cp.lap, &ir->lap, 3);
571 cp.length = ir->length;
572 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200573 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574}
575
576int hci_inquiry(void __user *arg)
577{
578 __u8 __user *ptr = arg;
579 struct hci_inquiry_req ir;
580 struct hci_dev *hdev;
581 int err = 0, do_inquiry = 0, max_rsp;
582 long timeo;
583 __u8 *buf;
584
585 if (copy_from_user(&ir, ptr, sizeof(ir)))
586 return -EFAULT;
587
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200588 hdev = hci_dev_get(ir.dev_id);
589 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 return -ENODEV;
591
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300592 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900593 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200594 inquiry_cache_empty(hdev) ||
595 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 inquiry_cache_flush(hdev);
597 do_inquiry = 1;
598 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300599 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Marcel Holtmann04837f62006-07-03 10:02:33 +0200601 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200602
603 if (do_inquiry) {
604 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
605 if (err < 0)
606 goto done;
607 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608
609 /* for unlimited number of responses we will use buffer with 255 entries */
610 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
611
612 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
613 * copy it to the user space.
614 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100615 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200616 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 err = -ENOMEM;
618 goto done;
619 }
620
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300621 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300623 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 BT_DBG("num_rsp %d", ir.num_rsp);
626
627 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
628 ptr += sizeof(ir);
629 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
630 ir.num_rsp))
631 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900632 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 err = -EFAULT;
634
635 kfree(buf);
636
637done:
638 hci_dev_put(hdev);
639 return err;
640}
641
642/* ---- HCI ioctl helpers ---- */
643
644int hci_dev_open(__u16 dev)
645{
646 struct hci_dev *hdev;
647 int ret = 0;
648
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200649 hdev = hci_dev_get(dev);
650 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 return -ENODEV;
652
653 BT_DBG("%s %p", hdev->name, hdev);
654
655 hci_req_lock(hdev);
656
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200657 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
658 ret = -ERFKILL;
659 goto done;
660 }
661
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 if (test_bit(HCI_UP, &hdev->flags)) {
663 ret = -EALREADY;
664 goto done;
665 }
666
667 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
668 set_bit(HCI_RAW, &hdev->flags);
669
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200670 /* Treat all non BR/EDR controllers as raw devices if
671 enable_hs is not set */
672 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100673 set_bit(HCI_RAW, &hdev->flags);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (hdev->open(hdev)) {
676 ret = -EIO;
677 goto done;
678 }
679
680 if (!test_bit(HCI_RAW, &hdev->flags)) {
681 atomic_set(&hdev->cmd_cnt, 1);
682 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200683 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
Marcel Holtmann04837f62006-07-03 10:02:33 +0200685 ret = __hci_request(hdev, hci_init_req, 0,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Andre Guedeseead27d2011-06-30 19:20:55 -0300688 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300689 ret = __hci_request(hdev, hci_le_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300700 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200701 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300702 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200703 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900704 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200706 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200707 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400708 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
712
713 if (hdev->flush)
714 hdev->flush(hdev);
715
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
719 }
720
721 hdev->close(hdev);
722 hdev->flags = 0;
723 }
724
725done:
726 hci_req_unlock(hdev);
727 hci_dev_put(hdev);
728 return ret;
729}
730
731static int hci_dev_do_close(struct hci_dev *hdev)
732{
733 BT_DBG("%s %p", hdev->name, hdev);
734
735 hci_req_cancel(hdev, ENODEV);
736 hci_req_lock(hdev);
737
738 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300739 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 hci_req_unlock(hdev);
741 return 0;
742 }
743
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200744 /* Flush RX and TX works */
745 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400746 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200748 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200749 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200750 hdev->discov_timeout = 0;
751 }
752
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200753 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200754 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200755
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200756 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200757 cancel_delayed_work(&hdev->service_cache);
758
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300759 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 inquiry_cache_flush(hdev);
761 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300762 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 hci_notify(hdev, HCI_DEV_DOWN);
765
766 if (hdev->flush)
767 hdev->flush(hdev);
768
769 /* Reset device */
770 skb_queue_purge(&hdev->cmd_q);
771 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200772 if (!test_bit(HCI_RAW, &hdev->flags) &&
773 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200775 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200776 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 clear_bit(HCI_INIT, &hdev->flags);
778 }
779
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200780 /* flush cmd work */
781 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 /* Drop queues */
784 skb_queue_purge(&hdev->rx_q);
785 skb_queue_purge(&hdev->cmd_q);
786 skb_queue_purge(&hdev->raw_q);
787
788 /* Drop last sent command */
789 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300790 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 kfree_skb(hdev->sent_cmd);
792 hdev->sent_cmd = NULL;
793 }
794
795 /* After this point our queues are empty
796 * and no tasks are scheduled. */
797 hdev->close(hdev);
798
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300799 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200800 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300801 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 /* Clear flags */
804 hdev->flags = 0;
805
806 hci_req_unlock(hdev);
807
808 hci_dev_put(hdev);
809 return 0;
810}
811
812int hci_dev_close(__u16 dev)
813{
814 struct hci_dev *hdev;
815 int err;
816
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200817 hdev = hci_dev_get(dev);
818 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 return -ENODEV;
820 err = hci_dev_do_close(hdev);
821 hci_dev_put(hdev);
822 return err;
823}
824
825int hci_dev_reset(__u16 dev)
826{
827 struct hci_dev *hdev;
828 int ret = 0;
829
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200830 hdev = hci_dev_get(dev);
831 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 return -ENODEV;
833
834 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 if (!test_bit(HCI_UP, &hdev->flags))
837 goto done;
838
839 /* Drop queues */
840 skb_queue_purge(&hdev->rx_q);
841 skb_queue_purge(&hdev->cmd_q);
842
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300843 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 inquiry_cache_flush(hdev);
845 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300846 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
848 if (hdev->flush)
849 hdev->flush(hdev);
850
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900851 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300852 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
854 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200855 ret = __hci_request(hdev, hci_reset_req, 0,
856 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
858done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 hci_req_unlock(hdev);
860 hci_dev_put(hdev);
861 return ret;
862}
863
864int hci_dev_reset_stat(__u16 dev)
865{
866 struct hci_dev *hdev;
867 int ret = 0;
868
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200869 hdev = hci_dev_get(dev);
870 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 return -ENODEV;
872
873 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
874
875 hci_dev_put(hdev);
876
877 return ret;
878}
879
880int hci_dev_cmd(unsigned int cmd, void __user *arg)
881{
882 struct hci_dev *hdev;
883 struct hci_dev_req dr;
884 int err = 0;
885
886 if (copy_from_user(&dr, arg, sizeof(dr)))
887 return -EFAULT;
888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200889 hdev = hci_dev_get(dr.dev_id);
890 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 return -ENODEV;
892
893 switch (cmd) {
894 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200895 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
896 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 break;
898
899 case HCISETENCRYPT:
900 if (!lmp_encrypt_capable(hdev)) {
901 err = -EOPNOTSUPP;
902 break;
903 }
904
905 if (!test_bit(HCI_AUTH, &hdev->flags)) {
906 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200907 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 if (err)
910 break;
911 }
912
Marcel Holtmann04837f62006-07-03 10:02:33 +0200913 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
914 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 break;
916
917 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 break;
921
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200922 case HCISETLINKPOL:
923 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
925 break;
926
927 case HCISETLINKMODE:
928 hdev->link_mode = ((__u16) dr.dev_opt) &
929 (HCI_LM_MASTER | HCI_LM_ACCEPT);
930 break;
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 case HCISETPTYPE:
933 hdev->pkt_type = (__u16) dr.dev_opt;
934 break;
935
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200937 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
938 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 break;
940
941 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 default:
947 err = -EINVAL;
948 break;
949 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200950
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 hci_dev_put(hdev);
952 return err;
953}
954
955int hci_get_dev_list(void __user *arg)
956{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200957 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 struct hci_dev_list_req *dl;
959 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 int n = 0, size, err;
961 __u16 dev_num;
962
963 if (get_user(dev_num, (__u16 __user *) arg))
964 return -EFAULT;
965
966 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
967 return -EINVAL;
968
969 size = sizeof(*dl) + dev_num * sizeof(*dr);
970
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200971 dl = kzalloc(size, GFP_KERNEL);
972 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 return -ENOMEM;
974
975 dr = dl->dev_req;
976
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200977 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200978 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200979 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200980 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200981
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200982 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
983 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 (dr + n)->dev_id = hdev->id;
986 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200987
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 if (++n >= dev_num)
989 break;
990 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200991 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992
993 dl->dev_num = n;
994 size = sizeof(*dl) + n * sizeof(*dr);
995
996 err = copy_to_user(arg, dl, size);
997 kfree(dl);
998
999 return err ? -EFAULT : 0;
1000}
1001
1002int hci_get_dev_info(void __user *arg)
1003{
1004 struct hci_dev *hdev;
1005 struct hci_dev_info di;
1006 int err = 0;
1007
1008 if (copy_from_user(&di, arg, sizeof(di)))
1009 return -EFAULT;
1010
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001011 hdev = hci_dev_get(di.dev_id);
1012 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 return -ENODEV;
1014
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001015 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001016 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001017
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001018 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1019 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 strcpy(di.name, hdev->name);
1022 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001023 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 di.flags = hdev->flags;
1025 di.pkt_type = hdev->pkt_type;
1026 di.acl_mtu = hdev->acl_mtu;
1027 di.acl_pkts = hdev->acl_pkts;
1028 di.sco_mtu = hdev->sco_mtu;
1029 di.sco_pkts = hdev->sco_pkts;
1030 di.link_policy = hdev->link_policy;
1031 di.link_mode = hdev->link_mode;
1032
1033 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1034 memcpy(&di.features, &hdev->features, sizeof(di.features));
1035
1036 if (copy_to_user(arg, &di, sizeof(di)))
1037 err = -EFAULT;
1038
1039 hci_dev_put(hdev);
1040
1041 return err;
1042}
1043
1044/* ---- Interface to HCI drivers ---- */
1045
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001046static int hci_rfkill_set_block(void *data, bool blocked)
1047{
1048 struct hci_dev *hdev = data;
1049
1050 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1051
1052 if (!blocked)
1053 return 0;
1054
1055 hci_dev_do_close(hdev);
1056
1057 return 0;
1058}
1059
1060static const struct rfkill_ops hci_rfkill_ops = {
1061 .set_block = hci_rfkill_set_block,
1062};
1063
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064/* Alloc HCI device */
1065struct hci_dev *hci_alloc_dev(void)
1066{
1067 struct hci_dev *hdev;
1068
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001069 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 if (!hdev)
1071 return NULL;
1072
David Herrmann0ac7e702011-10-08 14:58:47 +02001073 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 skb_queue_head_init(&hdev->driver_init);
1075
1076 return hdev;
1077}
1078EXPORT_SYMBOL(hci_alloc_dev);
1079
1080/* Free HCI device */
1081void hci_free_dev(struct hci_dev *hdev)
1082{
1083 skb_queue_purge(&hdev->driver_init);
1084
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001085 /* will free via device release */
1086 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088EXPORT_SYMBOL(hci_free_dev);
1089
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001090static void hci_power_on(struct work_struct *work)
1091{
1092 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1093
1094 BT_DBG("%s", hdev->name);
1095
1096 if (hci_dev_open(hdev->id) < 0)
1097 return;
1098
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001099 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001100 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001101 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001102
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001103 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001104 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105}
1106
1107static void hci_power_off(struct work_struct *work)
1108{
Johan Hedberg32435532011-11-07 22:16:04 +02001109 struct hci_dev *hdev = container_of(work, struct hci_dev,
1110 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001111
1112 BT_DBG("%s", hdev->name);
1113
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001114 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001115
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001116 hci_dev_close(hdev->id);
1117}
1118
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001119static void hci_discov_off(struct work_struct *work)
1120{
1121 struct hci_dev *hdev;
1122 u8 scan = SCAN_PAGE;
1123
1124 hdev = container_of(work, struct hci_dev, discov_off.work);
1125
1126 BT_DBG("%s", hdev->name);
1127
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001128 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001129
1130 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1131
1132 hdev->discov_timeout = 0;
1133
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001134 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001135}
1136
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001137int hci_uuids_clear(struct hci_dev *hdev)
1138{
1139 struct list_head *p, *n;
1140
1141 list_for_each_safe(p, n, &hdev->uuids) {
1142 struct bt_uuid *uuid;
1143
1144 uuid = list_entry(p, struct bt_uuid, list);
1145
1146 list_del(p);
1147 kfree(uuid);
1148 }
1149
1150 return 0;
1151}
1152
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001153int hci_link_keys_clear(struct hci_dev *hdev)
1154{
1155 struct list_head *p, *n;
1156
1157 list_for_each_safe(p, n, &hdev->link_keys) {
1158 struct link_key *key;
1159
1160 key = list_entry(p, struct link_key, list);
1161
1162 list_del(p);
1163 kfree(key);
1164 }
1165
1166 return 0;
1167}
1168
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001169int hci_smp_ltks_clear(struct hci_dev *hdev)
1170{
1171 struct smp_ltk *k, *tmp;
1172
1173 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1174 list_del(&k->list);
1175 kfree(k);
1176 }
1177
1178 return 0;
1179}
1180
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001181struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1182{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001183 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001184
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001185 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001186 if (bacmp(bdaddr, &k->bdaddr) == 0)
1187 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001188
1189 return NULL;
1190}
1191
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001192static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1193 u8 key_type, u8 old_key_type)
1194{
1195 /* Legacy key */
1196 if (key_type < 0x03)
1197 return 1;
1198
1199 /* Debug keys are insecure so don't store them persistently */
1200 if (key_type == HCI_LK_DEBUG_COMBINATION)
1201 return 0;
1202
1203 /* Changed combination key and there's no previous one */
1204 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1205 return 0;
1206
1207 /* Security mode 3 case */
1208 if (!conn)
1209 return 1;
1210
1211 /* Neither local nor remote side had no-bonding as requirement */
1212 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1213 return 1;
1214
1215 /* Local side had dedicated bonding as requirement */
1216 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1217 return 1;
1218
1219 /* Remote side had dedicated bonding as requirement */
1220 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1221 return 1;
1222
1223 /* If none of the above criteria match, then don't store the key
1224 * persistently */
1225 return 0;
1226}
1227
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001228struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001229{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001230 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001231
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001232 list_for_each_entry(k, &hdev->long_term_keys, list) {
1233 if (k->ediv != ediv ||
1234 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001235 continue;
1236
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238 }
1239
1240 return NULL;
1241}
1242EXPORT_SYMBOL(hci_find_ltk);
1243
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001244struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1245 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001246{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001247 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001249 list_for_each_entry(k, &hdev->long_term_keys, list)
1250 if (addr_type == k->bdaddr_type &&
1251 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001252 return k;
1253
1254 return NULL;
1255}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001258int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1259 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001260{
1261 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001262 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001263
1264 old_key = hci_find_link_key(hdev, bdaddr);
1265 if (old_key) {
1266 old_key_type = old_key->type;
1267 key = old_key;
1268 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001269 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001270 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1271 if (!key)
1272 return -ENOMEM;
1273 list_add(&key->list, &hdev->link_keys);
1274 }
1275
1276 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1277
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001278 /* Some buggy controller combinations generate a changed
1279 * combination key for legacy pairing even when there's no
1280 * previous key */
1281 if (type == HCI_LK_CHANGED_COMBINATION &&
1282 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001283 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001284 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001285 if (conn)
1286 conn->key_type = type;
1287 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001288
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001291 key->pin_len = pin_len;
1292
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001293 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001294 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001295 else
1296 key->type = type;
1297
Johan Hedberg4df378a2011-04-28 11:29:03 -07001298 if (!new_key)
1299 return 0;
1300
1301 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1302
Johan Hedberg744cf192011-11-08 20:40:14 +02001303 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001304
1305 if (!persistent) {
1306 list_del(&key->list);
1307 kfree(key);
1308 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001309
1310 return 0;
1311}
1312
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001313int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1314 int new_key, u8 authenticated, u8 tk[16],
1315 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001316{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1320 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1323 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 else {
1326 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001327 if (!key)
1328 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001329 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001330 }
1331
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001333 key->bdaddr_type = addr_type;
1334 memcpy(key->val, tk, sizeof(key->val));
1335 key->authenticated = authenticated;
1336 key->ediv = ediv;
1337 key->enc_size = enc_size;
1338 key->type = type;
1339 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001340
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001341 if (!new_key)
1342 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001344 if (type & HCI_SMP_LTK)
1345 mgmt_new_ltk(hdev, key, 1);
1346
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001347 return 0;
1348}
1349
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001350int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351{
1352 struct link_key *key;
1353
1354 key = hci_find_link_key(hdev, bdaddr);
1355 if (!key)
1356 return -ENOENT;
1357
1358 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1359
1360 list_del(&key->list);
1361 kfree(key);
1362
1363 return 0;
1364}
1365
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001366int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1367{
1368 struct smp_ltk *k, *tmp;
1369
1370 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1371 if (bacmp(bdaddr, &k->bdaddr))
1372 continue;
1373
1374 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1375
1376 list_del(&k->list);
1377 kfree(k);
1378 }
1379
1380 return 0;
1381}
1382
Ville Tervo6bd32322011-02-16 16:32:41 +02001383/* HCI command timer function */
1384static void hci_cmd_timer(unsigned long arg)
1385{
1386 struct hci_dev *hdev = (void *) arg;
1387
1388 BT_ERR("%s command tx timeout", hdev->name);
1389 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001390 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001391}
1392
Szymon Janc2763eda2011-03-22 13:12:22 +01001393struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1394 bdaddr_t *bdaddr)
1395{
1396 struct oob_data *data;
1397
1398 list_for_each_entry(data, &hdev->remote_oob_data, list)
1399 if (bacmp(bdaddr, &data->bdaddr) == 0)
1400 return data;
1401
1402 return NULL;
1403}
1404
1405int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1406{
1407 struct oob_data *data;
1408
1409 data = hci_find_remote_oob_data(hdev, bdaddr);
1410 if (!data)
1411 return -ENOENT;
1412
1413 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1414
1415 list_del(&data->list);
1416 kfree(data);
1417
1418 return 0;
1419}
1420
1421int hci_remote_oob_data_clear(struct hci_dev *hdev)
1422{
1423 struct oob_data *data, *n;
1424
1425 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1426 list_del(&data->list);
1427 kfree(data);
1428 }
1429
1430 return 0;
1431}
1432
1433int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1434 u8 *randomizer)
1435{
1436 struct oob_data *data;
1437
1438 data = hci_find_remote_oob_data(hdev, bdaddr);
1439
1440 if (!data) {
1441 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1442 if (!data)
1443 return -ENOMEM;
1444
1445 bacpy(&data->bdaddr, bdaddr);
1446 list_add(&data->list, &hdev->remote_oob_data);
1447 }
1448
1449 memcpy(data->hash, hash, sizeof(data->hash));
1450 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1451
1452 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1453
1454 return 0;
1455}
1456
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1458 bdaddr_t *bdaddr)
1459{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001460 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001461
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001462 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001463 if (bacmp(bdaddr, &b->bdaddr) == 0)
1464 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001465
1466 return NULL;
1467}
1468
1469int hci_blacklist_clear(struct hci_dev *hdev)
1470{
1471 struct list_head *p, *n;
1472
1473 list_for_each_safe(p, n, &hdev->blacklist) {
1474 struct bdaddr_list *b;
1475
1476 b = list_entry(p, struct bdaddr_list, list);
1477
1478 list_del(p);
1479 kfree(b);
1480 }
1481
1482 return 0;
1483}
1484
1485int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1486{
1487 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
1489 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1490 return -EBADF;
1491
Antti Julku5e762442011-08-25 16:48:02 +03001492 if (hci_blacklist_lookup(hdev, bdaddr))
1493 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494
1495 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001496 if (!entry)
1497 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001498
1499 bacpy(&entry->bdaddr, bdaddr);
1500
1501 list_add(&entry->list, &hdev->blacklist);
1502
Johan Hedberg744cf192011-11-08 20:40:14 +02001503 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504}
1505
1506int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1507{
1508 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509
Szymon Janc1ec918c2011-11-16 09:32:21 +01001510 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001511 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512
1513 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001514 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001515 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001516
1517 list_del(&entry->list);
1518 kfree(entry);
1519
Johan Hedberg744cf192011-11-08 20:40:14 +02001520 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001521}
1522
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001523static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001524{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001525 struct hci_dev *hdev = container_of(work, struct hci_dev,
1526 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001527
1528 hci_dev_lock(hdev);
1529
1530 hci_adv_entries_clear(hdev);
1531
1532 hci_dev_unlock(hdev);
1533}
1534
Andre Guedes76c86862011-05-26 16:23:50 -03001535int hci_adv_entries_clear(struct hci_dev *hdev)
1536{
1537 struct adv_entry *entry, *tmp;
1538
1539 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1540 list_del(&entry->list);
1541 kfree(entry);
1542 }
1543
1544 BT_DBG("%s adv cache cleared", hdev->name);
1545
1546 return 0;
1547}
1548
1549struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1550{
1551 struct adv_entry *entry;
1552
1553 list_for_each_entry(entry, &hdev->adv_entries, list)
1554 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1555 return entry;
1556
1557 return NULL;
1558}
1559
1560static inline int is_connectable_adv(u8 evt_type)
1561{
1562 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1563 return 1;
1564
1565 return 0;
1566}
1567
1568int hci_add_adv_entry(struct hci_dev *hdev,
1569 struct hci_ev_le_advertising_info *ev)
1570{
1571 struct adv_entry *entry;
1572
1573 if (!is_connectable_adv(ev->evt_type))
1574 return -EINVAL;
1575
1576 /* Only new entries should be added to adv_entries. So, if
1577 * bdaddr was found, don't add it. */
1578 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1579 return 0;
1580
Andre Guedes4777bfd2012-01-30 23:31:28 -03001581 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001582 if (!entry)
1583 return -ENOMEM;
1584
1585 bacpy(&entry->bdaddr, &ev->bdaddr);
1586 entry->bdaddr_type = ev->bdaddr_type;
1587
1588 list_add(&entry->list, &hdev->adv_entries);
1589
1590 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1591 batostr(&entry->bdaddr), entry->bdaddr_type);
1592
1593 return 0;
1594}
1595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596/* Register HCI device */
1597int hci_register_dev(struct hci_dev *hdev)
1598{
1599 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001600 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603
David Herrmann010666a2012-01-07 15:47:07 +01001604 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605 return -EINVAL;
1606
Mat Martineau08add512011-11-02 16:18:36 -07001607 /* Do not allow HCI_AMP devices to register at index 0,
1608 * so the index can be used as the AMP controller ID.
1609 */
1610 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1611
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001612 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613
1614 /* Find first available device id */
1615 list_for_each(p, &hci_dev_list) {
1616 if (list_entry(p, struct hci_dev, list)->id != id)
1617 break;
1618 head = p; id++;
1619 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001620
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621 sprintf(hdev->name, "hci%d", id);
1622 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001623 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001625 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001628 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001630 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001632 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Marcel Holtmann04837f62006-07-03 10:02:33 +02001634 hdev->idle_timeout = 0;
1635 hdev->sniff_max_interval = 800;
1636 hdev->sniff_min_interval = 80;
1637
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001638 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001639 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001640 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001641
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 skb_queue_head_init(&hdev->rx_q);
1644 skb_queue_head_init(&hdev->cmd_q);
1645 skb_queue_head_init(&hdev->raw_q);
1646
Ville Tervo6bd32322011-02-16 16:32:41 +02001647 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1648
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301649 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001650 hdev->reassembly[i] = NULL;
1651
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001653 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Johan Hedberg30883512012-01-04 14:16:21 +02001655 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656
1657 hci_conn_hash_init(hdev);
1658
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001659 INIT_LIST_HEAD(&hdev->mgmt_pending);
1660
David Millerea4bd8b2010-07-30 21:54:49 -07001661 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001662
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001663 INIT_LIST_HEAD(&hdev->uuids);
1664
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001665 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001666 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001667
Szymon Janc2763eda2011-03-22 13:12:22 +01001668 INIT_LIST_HEAD(&hdev->remote_oob_data);
1669
Andre Guedes76c86862011-05-26 16:23:50 -03001670 INIT_LIST_HEAD(&hdev->adv_entries);
1671
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001672 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001673 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001674 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001675
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001676 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1677
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1679
1680 atomic_set(&hdev->promisc, 0);
1681
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001682 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001684 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1685 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001686 if (!hdev->workqueue) {
1687 error = -ENOMEM;
1688 goto err;
1689 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001690
David Herrmann33ca9542011-10-08 14:58:49 +02001691 error = hci_add_sysfs(hdev);
1692 if (error < 0)
1693 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1697 if (hdev->rfkill) {
1698 if (rfkill_register(hdev->rfkill) < 0) {
1699 rfkill_destroy(hdev->rfkill);
1700 hdev->rfkill = NULL;
1701 }
1702 }
1703
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001704 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1705 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001706 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001709 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
1711 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001712
David Herrmann33ca9542011-10-08 14:58:49 +02001713err_wqueue:
1714 destroy_workqueue(hdev->workqueue);
1715err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001716 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001717 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001718 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001719
David Herrmann33ca9542011-10-08 14:58:49 +02001720 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721}
1722EXPORT_SYMBOL(hci_register_dev);
1723
1724/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001725void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726{
Marcel Holtmannef222012007-07-11 06:42:04 +02001727 int i;
1728
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001729 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001731 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001733 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 hci_dev_do_close(hdev);
1736
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301737 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001738 kfree_skb(hdev->reassembly[i]);
1739
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001740 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001741 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001742 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001743 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001744 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001745 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001746
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001747 /* mgmt_index_removed should take care of emptying the
1748 * pending list */
1749 BUG_ON(!list_empty(&hdev->mgmt_pending));
1750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 hci_notify(hdev, HCI_DEV_UNREG);
1752
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001753 if (hdev->rfkill) {
1754 rfkill_unregister(hdev->rfkill);
1755 rfkill_destroy(hdev->rfkill);
1756 }
1757
David Herrmannce242972011-10-08 14:58:48 +02001758 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001759
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001760 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001761
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001762 destroy_workqueue(hdev->workqueue);
1763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001764 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001765 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001766 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001767 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001768 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001769 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001770 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001771 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001772
David Herrmanndc946bd2012-01-07 15:47:24 +01001773 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774}
1775EXPORT_SYMBOL(hci_unregister_dev);
1776
1777/* Suspend HCI device */
1778int hci_suspend_dev(struct hci_dev *hdev)
1779{
1780 hci_notify(hdev, HCI_DEV_SUSPEND);
1781 return 0;
1782}
1783EXPORT_SYMBOL(hci_suspend_dev);
1784
1785/* Resume HCI device */
1786int hci_resume_dev(struct hci_dev *hdev)
1787{
1788 hci_notify(hdev, HCI_DEV_RESUME);
1789 return 0;
1790}
1791EXPORT_SYMBOL(hci_resume_dev);
1792
Marcel Holtmann76bca882009-11-18 00:40:39 +01001793/* Receive frame from HCI drivers */
1794int hci_recv_frame(struct sk_buff *skb)
1795{
1796 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1797 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1798 && !test_bit(HCI_INIT, &hdev->flags))) {
1799 kfree_skb(skb);
1800 return -ENXIO;
1801 }
1802
1803 /* Incomming skb */
1804 bt_cb(skb)->incoming = 1;
1805
1806 /* Time stamp */
1807 __net_timestamp(skb);
1808
Marcel Holtmann76bca882009-11-18 00:40:39 +01001809 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001810 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001811
Marcel Holtmann76bca882009-11-18 00:40:39 +01001812 return 0;
1813}
1814EXPORT_SYMBOL(hci_recv_frame);
1815
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301816static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001817 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301818{
1819 int len = 0;
1820 int hlen = 0;
1821 int remain = count;
1822 struct sk_buff *skb;
1823 struct bt_skb_cb *scb;
1824
1825 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1826 index >= NUM_REASSEMBLY)
1827 return -EILSEQ;
1828
1829 skb = hdev->reassembly[index];
1830
1831 if (!skb) {
1832 switch (type) {
1833 case HCI_ACLDATA_PKT:
1834 len = HCI_MAX_FRAME_SIZE;
1835 hlen = HCI_ACL_HDR_SIZE;
1836 break;
1837 case HCI_EVENT_PKT:
1838 len = HCI_MAX_EVENT_SIZE;
1839 hlen = HCI_EVENT_HDR_SIZE;
1840 break;
1841 case HCI_SCODATA_PKT:
1842 len = HCI_MAX_SCO_SIZE;
1843 hlen = HCI_SCO_HDR_SIZE;
1844 break;
1845 }
1846
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001847 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301848 if (!skb)
1849 return -ENOMEM;
1850
1851 scb = (void *) skb->cb;
1852 scb->expect = hlen;
1853 scb->pkt_type = type;
1854
1855 skb->dev = (void *) hdev;
1856 hdev->reassembly[index] = skb;
1857 }
1858
1859 while (count) {
1860 scb = (void *) skb->cb;
1861 len = min(scb->expect, (__u16)count);
1862
1863 memcpy(skb_put(skb, len), data, len);
1864
1865 count -= len;
1866 data += len;
1867 scb->expect -= len;
1868 remain = count;
1869
1870 switch (type) {
1871 case HCI_EVENT_PKT:
1872 if (skb->len == HCI_EVENT_HDR_SIZE) {
1873 struct hci_event_hdr *h = hci_event_hdr(skb);
1874 scb->expect = h->plen;
1875
1876 if (skb_tailroom(skb) < scb->expect) {
1877 kfree_skb(skb);
1878 hdev->reassembly[index] = NULL;
1879 return -ENOMEM;
1880 }
1881 }
1882 break;
1883
1884 case HCI_ACLDATA_PKT:
1885 if (skb->len == HCI_ACL_HDR_SIZE) {
1886 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1887 scb->expect = __le16_to_cpu(h->dlen);
1888
1889 if (skb_tailroom(skb) < scb->expect) {
1890 kfree_skb(skb);
1891 hdev->reassembly[index] = NULL;
1892 return -ENOMEM;
1893 }
1894 }
1895 break;
1896
1897 case HCI_SCODATA_PKT:
1898 if (skb->len == HCI_SCO_HDR_SIZE) {
1899 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1900 scb->expect = h->dlen;
1901
1902 if (skb_tailroom(skb) < scb->expect) {
1903 kfree_skb(skb);
1904 hdev->reassembly[index] = NULL;
1905 return -ENOMEM;
1906 }
1907 }
1908 break;
1909 }
1910
1911 if (scb->expect == 0) {
1912 /* Complete frame */
1913
1914 bt_cb(skb)->pkt_type = type;
1915 hci_recv_frame(skb);
1916
1917 hdev->reassembly[index] = NULL;
1918 return remain;
1919 }
1920 }
1921
1922 return remain;
1923}
1924
Marcel Holtmannef222012007-07-11 06:42:04 +02001925int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1926{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301927 int rem = 0;
1928
Marcel Holtmannef222012007-07-11 06:42:04 +02001929 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1930 return -EILSEQ;
1931
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001932 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001933 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301934 if (rem < 0)
1935 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001936
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301937 data += (count - rem);
1938 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001939 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001940
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301941 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001942}
1943EXPORT_SYMBOL(hci_recv_fragment);
1944
Suraj Sumangala99811512010-07-14 13:02:19 +05301945#define STREAM_REASSEMBLY 0
1946
1947int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1948{
1949 int type;
1950 int rem = 0;
1951
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001952 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301953 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1954
1955 if (!skb) {
1956 struct { char type; } *pkt;
1957
1958 /* Start of the frame */
1959 pkt = data;
1960 type = pkt->type;
1961
1962 data++;
1963 count--;
1964 } else
1965 type = bt_cb(skb)->pkt_type;
1966
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001967 rem = hci_reassembly(hdev, type, data, count,
1968 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301969 if (rem < 0)
1970 return rem;
1971
1972 data += (count - rem);
1973 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001974 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301975
1976 return rem;
1977}
1978EXPORT_SYMBOL(hci_recv_stream_fragment);
1979
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980/* ---- Interface to upper protocols ---- */
1981
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982int hci_register_cb(struct hci_cb *cb)
1983{
1984 BT_DBG("%p name %s", cb, cb->name);
1985
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001986 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001988 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
1990 return 0;
1991}
1992EXPORT_SYMBOL(hci_register_cb);
1993
1994int hci_unregister_cb(struct hci_cb *cb)
1995{
1996 BT_DBG("%p name %s", cb, cb->name);
1997
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001998 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002000 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001
2002 return 0;
2003}
2004EXPORT_SYMBOL(hci_unregister_cb);
2005
2006static int hci_send_frame(struct sk_buff *skb)
2007{
2008 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2009
2010 if (!hdev) {
2011 kfree_skb(skb);
2012 return -ENODEV;
2013 }
2014
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002015 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 if (atomic_read(&hdev->promisc)) {
2018 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002019 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002021 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 }
2023
2024 /* Get rid of skb owner, prior to sending to the driver. */
2025 skb_orphan(skb);
2026
2027 return hdev->send(skb);
2028}
2029
2030/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002031int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032{
2033 int len = HCI_COMMAND_HDR_SIZE + plen;
2034 struct hci_command_hdr *hdr;
2035 struct sk_buff *skb;
2036
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002037 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
2039 skb = bt_skb_alloc(len, GFP_ATOMIC);
2040 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002041 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 return -ENOMEM;
2043 }
2044
2045 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002046 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 hdr->plen = plen;
2048
2049 if (plen)
2050 memcpy(skb_put(skb, plen), param, plen);
2051
2052 BT_DBG("skb len %d", skb->len);
2053
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002054 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002056
Johan Hedberga5040ef2011-01-10 13:28:59 +02002057 if (test_bit(HCI_INIT, &hdev->flags))
2058 hdev->init_last_cmd = opcode;
2059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002061 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063 return 0;
2064}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
2066/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002067void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 struct hci_command_hdr *hdr;
2070
2071 if (!hdev->sent_cmd)
2072 return NULL;
2073
2074 hdr = (void *) hdev->sent_cmd->data;
2075
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002076 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 return NULL;
2078
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002079 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2082}
2083
2084/* Send ACL data */
2085static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2086{
2087 struct hci_acl_hdr *hdr;
2088 int len = skb->len;
2089
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002090 skb_push(skb, HCI_ACL_HDR_SIZE);
2091 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002092 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002093 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2094 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095}
2096
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002097static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2098 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099{
2100 struct hci_dev *hdev = conn->hdev;
2101 struct sk_buff *list;
2102
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002103 list = skb_shinfo(skb)->frag_list;
2104 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 /* Non fragmented */
2106 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2107
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002108 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 } else {
2110 /* Fragmented */
2111 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2112
2113 skb_shinfo(skb)->frag_list = NULL;
2114
2115 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002116 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002118 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002119
2120 flags &= ~ACL_START;
2121 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 do {
2123 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002126 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002127 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2130
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002131 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 } while (list);
2133
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002134 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002136}
2137
2138void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2139{
2140 struct hci_conn *conn = chan->conn;
2141 struct hci_dev *hdev = conn->hdev;
2142
2143 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2144
2145 skb->dev = (void *) hdev;
2146 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2147 hci_add_acl_hdr(skb, conn->handle, flags);
2148
2149 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002151 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153EXPORT_SYMBOL(hci_send_acl);
2154
2155/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002156void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 struct hci_dev *hdev = conn->hdev;
2159 struct hci_sco_hdr hdr;
2160
2161 BT_DBG("%s len %d", hdev->name, skb->len);
2162
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002163 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 hdr.dlen = skb->len;
2165
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002166 skb_push(skb, HCI_SCO_HDR_SIZE);
2167 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002168 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002171 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002174 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175}
2176EXPORT_SYMBOL(hci_send_sco);
2177
2178/* ---- HCI TX task (outgoing data) ---- */
2179
2180/* HCI Connection scheduler */
2181static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2182{
2183 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002184 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002187 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002189
2190 rcu_read_lock();
2191
2192 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002193 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002195
2196 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2197 continue;
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 num++;
2200
2201 if (c->sent < min) {
2202 min = c->sent;
2203 conn = c;
2204 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002205
2206 if (hci_conn_num(hdev, type) == num)
2207 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 }
2209
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002210 rcu_read_unlock();
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002213 int cnt, q;
2214
2215 switch (conn->type) {
2216 case ACL_LINK:
2217 cnt = hdev->acl_cnt;
2218 break;
2219 case SCO_LINK:
2220 case ESCO_LINK:
2221 cnt = hdev->sco_cnt;
2222 break;
2223 case LE_LINK:
2224 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2225 break;
2226 default:
2227 cnt = 0;
2228 BT_ERR("Unknown link type");
2229 }
2230
2231 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 *quote = q ? q : 1;
2233 } else
2234 *quote = 0;
2235
2236 BT_DBG("conn %p quote %d", conn, *quote);
2237 return conn;
2238}
2239
Ville Tervobae1f5d2011-02-10 22:38:53 -03002240static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
2242 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002243 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Ville Tervobae1f5d2011-02-10 22:38:53 -03002245 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002247 rcu_read_lock();
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002250 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002251 if (c->type == type && c->sent) {
2252 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 hdev->name, batostr(&c->dst));
2254 hci_acl_disconn(c, 0x13);
2255 }
2256 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002257
2258 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259}
2260
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002261static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2262 int *quote)
2263{
2264 struct hci_conn_hash *h = &hdev->conn_hash;
2265 struct hci_chan *chan = NULL;
2266 int num = 0, min = ~0, cur_prio = 0;
2267 struct hci_conn *conn;
2268 int cnt, q, conn_num = 0;
2269
2270 BT_DBG("%s", hdev->name);
2271
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002272 rcu_read_lock();
2273
2274 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002275 struct hci_chan *tmp;
2276
2277 if (conn->type != type)
2278 continue;
2279
2280 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2281 continue;
2282
2283 conn_num++;
2284
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002285 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002286 struct sk_buff *skb;
2287
2288 if (skb_queue_empty(&tmp->data_q))
2289 continue;
2290
2291 skb = skb_peek(&tmp->data_q);
2292 if (skb->priority < cur_prio)
2293 continue;
2294
2295 if (skb->priority > cur_prio) {
2296 num = 0;
2297 min = ~0;
2298 cur_prio = skb->priority;
2299 }
2300
2301 num++;
2302
2303 if (conn->sent < min) {
2304 min = conn->sent;
2305 chan = tmp;
2306 }
2307 }
2308
2309 if (hci_conn_num(hdev, type) == conn_num)
2310 break;
2311 }
2312
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002313 rcu_read_unlock();
2314
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002315 if (!chan)
2316 return NULL;
2317
2318 switch (chan->conn->type) {
2319 case ACL_LINK:
2320 cnt = hdev->acl_cnt;
2321 break;
2322 case SCO_LINK:
2323 case ESCO_LINK:
2324 cnt = hdev->sco_cnt;
2325 break;
2326 case LE_LINK:
2327 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328 break;
2329 default:
2330 cnt = 0;
2331 BT_ERR("Unknown link type");
2332 }
2333
2334 q = cnt / num;
2335 *quote = q ? q : 1;
2336 BT_DBG("chan %p quote %d", chan, *quote);
2337 return chan;
2338}
2339
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002340static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2341{
2342 struct hci_conn_hash *h = &hdev->conn_hash;
2343 struct hci_conn *conn;
2344 int num = 0;
2345
2346 BT_DBG("%s", hdev->name);
2347
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002348 rcu_read_lock();
2349
2350 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002351 struct hci_chan *chan;
2352
2353 if (conn->type != type)
2354 continue;
2355
2356 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2357 continue;
2358
2359 num++;
2360
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002361 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002362 struct sk_buff *skb;
2363
2364 if (chan->sent) {
2365 chan->sent = 0;
2366 continue;
2367 }
2368
2369 if (skb_queue_empty(&chan->data_q))
2370 continue;
2371
2372 skb = skb_peek(&chan->data_q);
2373 if (skb->priority >= HCI_PRIO_MAX - 1)
2374 continue;
2375
2376 skb->priority = HCI_PRIO_MAX - 1;
2377
2378 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2379 skb->priority);
2380 }
2381
2382 if (hci_conn_num(hdev, type) == num)
2383 break;
2384 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002385
2386 rcu_read_unlock();
2387
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002388}
2389
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002390static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2391{
2392 /* Calculate count of blocks used by this packet */
2393 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2394}
2395
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002396static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 if (!test_bit(HCI_RAW, &hdev->flags)) {
2399 /* ACL tx timeout must be longer than maximum
2400 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002401 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002402 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002403 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002405}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002407static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2408{
2409 unsigned int cnt = hdev->acl_cnt;
2410 struct hci_chan *chan;
2411 struct sk_buff *skb;
2412 int quote;
2413
2414 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002415
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002416 while (hdev->acl_cnt &&
2417 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002418 u32 priority = (skb_peek(&chan->data_q))->priority;
2419 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002420 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2421 skb->len, skb->priority);
2422
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002423 /* Stop if priority has changed */
2424 if (skb->priority < priority)
2425 break;
2426
2427 skb = skb_dequeue(&chan->data_q);
2428
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002429 hci_conn_enter_active_mode(chan->conn,
2430 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 hci_send_frame(skb);
2433 hdev->acl_last_tx = jiffies;
2434
2435 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002436 chan->sent++;
2437 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 }
2439 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002440
2441 if (cnt != hdev->acl_cnt)
2442 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443}
2444
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002445static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2446{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002447 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002448 struct hci_chan *chan;
2449 struct sk_buff *skb;
2450 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002451
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002452 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002453
2454 while (hdev->block_cnt > 0 &&
2455 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2456 u32 priority = (skb_peek(&chan->data_q))->priority;
2457 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2458 int blocks;
2459
2460 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2461 skb->len, skb->priority);
2462
2463 /* Stop if priority has changed */
2464 if (skb->priority < priority)
2465 break;
2466
2467 skb = skb_dequeue(&chan->data_q);
2468
2469 blocks = __get_blocks(hdev, skb);
2470 if (blocks > hdev->block_cnt)
2471 return;
2472
2473 hci_conn_enter_active_mode(chan->conn,
2474 bt_cb(skb)->force_active);
2475
2476 hci_send_frame(skb);
2477 hdev->acl_last_tx = jiffies;
2478
2479 hdev->block_cnt -= blocks;
2480 quote -= blocks;
2481
2482 chan->sent += blocks;
2483 chan->conn->sent += blocks;
2484 }
2485 }
2486
2487 if (cnt != hdev->block_cnt)
2488 hci_prio_recalculate(hdev, ACL_LINK);
2489}
2490
2491static inline void hci_sched_acl(struct hci_dev *hdev)
2492{
2493 BT_DBG("%s", hdev->name);
2494
2495 if (!hci_conn_num(hdev, ACL_LINK))
2496 return;
2497
2498 switch (hdev->flow_ctl_mode) {
2499 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2500 hci_sched_acl_pkt(hdev);
2501 break;
2502
2503 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2504 hci_sched_acl_blk(hdev);
2505 break;
2506 }
2507}
2508
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509/* Schedule SCO */
2510static inline void hci_sched_sco(struct hci_dev *hdev)
2511{
2512 struct hci_conn *conn;
2513 struct sk_buff *skb;
2514 int quote;
2515
2516 BT_DBG("%s", hdev->name);
2517
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002518 if (!hci_conn_num(hdev, SCO_LINK))
2519 return;
2520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2522 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2523 BT_DBG("skb %p len %d", skb, skb->len);
2524 hci_send_frame(skb);
2525
2526 conn->sent++;
2527 if (conn->sent == ~0)
2528 conn->sent = 0;
2529 }
2530 }
2531}
2532
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002533static inline void hci_sched_esco(struct hci_dev *hdev)
2534{
2535 struct hci_conn *conn;
2536 struct sk_buff *skb;
2537 int quote;
2538
2539 BT_DBG("%s", hdev->name);
2540
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002541 if (!hci_conn_num(hdev, ESCO_LINK))
2542 return;
2543
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002544 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2545 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2546 BT_DBG("skb %p len %d", skb, skb->len);
2547 hci_send_frame(skb);
2548
2549 conn->sent++;
2550 if (conn->sent == ~0)
2551 conn->sent = 0;
2552 }
2553 }
2554}
2555
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002556static inline void hci_sched_le(struct hci_dev *hdev)
2557{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002558 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002559 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002560 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002561
2562 BT_DBG("%s", hdev->name);
2563
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002564 if (!hci_conn_num(hdev, LE_LINK))
2565 return;
2566
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002567 if (!test_bit(HCI_RAW, &hdev->flags)) {
2568 /* LE tx timeout must be longer than maximum
2569 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002570 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002571 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002572 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002573 }
2574
2575 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002576 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002577 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002578 u32 priority = (skb_peek(&chan->data_q))->priority;
2579 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002580 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2581 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002582
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002583 /* Stop if priority has changed */
2584 if (skb->priority < priority)
2585 break;
2586
2587 skb = skb_dequeue(&chan->data_q);
2588
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002589 hci_send_frame(skb);
2590 hdev->le_last_tx = jiffies;
2591
2592 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002593 chan->sent++;
2594 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002595 }
2596 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002597
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002598 if (hdev->le_pkts)
2599 hdev->le_cnt = cnt;
2600 else
2601 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002602
2603 if (cnt != tmp)
2604 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002605}
2606
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002607static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002609 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 struct sk_buff *skb;
2611
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002612 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2613 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 /* Schedule queues and send stuff to HCI driver */
2616
2617 hci_sched_acl(hdev);
2618
2619 hci_sched_sco(hdev);
2620
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002621 hci_sched_esco(hdev);
2622
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002623 hci_sched_le(hdev);
2624
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 /* Send next queued raw (unknown type) packet */
2626 while ((skb = skb_dequeue(&hdev->raw_q)))
2627 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628}
2629
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002630/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631
2632/* ACL data packet */
2633static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2634{
2635 struct hci_acl_hdr *hdr = (void *) skb->data;
2636 struct hci_conn *conn;
2637 __u16 handle, flags;
2638
2639 skb_pull(skb, HCI_ACL_HDR_SIZE);
2640
2641 handle = __le16_to_cpu(hdr->handle);
2642 flags = hci_flags(handle);
2643 handle = hci_handle(handle);
2644
2645 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2646
2647 hdev->stat.acl_rx++;
2648
2649 hci_dev_lock(hdev);
2650 conn = hci_conn_hash_lookup_handle(hdev, handle);
2651 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002654 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002655
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002657 l2cap_recv_acldata(conn, skb, flags);
2658 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002660 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 hdev->name, handle);
2662 }
2663
2664 kfree_skb(skb);
2665}
2666
2667/* SCO data packet */
2668static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2669{
2670 struct hci_sco_hdr *hdr = (void *) skb->data;
2671 struct hci_conn *conn;
2672 __u16 handle;
2673
2674 skb_pull(skb, HCI_SCO_HDR_SIZE);
2675
2676 handle = __le16_to_cpu(hdr->handle);
2677
2678 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2679
2680 hdev->stat.sco_rx++;
2681
2682 hci_dev_lock(hdev);
2683 conn = hci_conn_hash_lookup_handle(hdev, handle);
2684 hci_dev_unlock(hdev);
2685
2686 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002688 sco_recv_scodata(conn, skb);
2689 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002691 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 hdev->name, handle);
2693 }
2694
2695 kfree_skb(skb);
2696}
2697
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002698static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002700 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 struct sk_buff *skb;
2702
2703 BT_DBG("%s", hdev->name);
2704
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 while ((skb = skb_dequeue(&hdev->rx_q))) {
2706 if (atomic_read(&hdev->promisc)) {
2707 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002708 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 }
2710
2711 if (test_bit(HCI_RAW, &hdev->flags)) {
2712 kfree_skb(skb);
2713 continue;
2714 }
2715
2716 if (test_bit(HCI_INIT, &hdev->flags)) {
2717 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002718 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 case HCI_ACLDATA_PKT:
2720 case HCI_SCODATA_PKT:
2721 kfree_skb(skb);
2722 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 }
2725
2726 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002727 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002729 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 hci_event_packet(hdev, skb);
2731 break;
2732
2733 case HCI_ACLDATA_PKT:
2734 BT_DBG("%s ACL data packet", hdev->name);
2735 hci_acldata_packet(hdev, skb);
2736 break;
2737
2738 case HCI_SCODATA_PKT:
2739 BT_DBG("%s SCO data packet", hdev->name);
2740 hci_scodata_packet(hdev, skb);
2741 break;
2742
2743 default:
2744 kfree_skb(skb);
2745 break;
2746 }
2747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748}
2749
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002750static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002752 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 struct sk_buff *skb;
2754
2755 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002758 if (atomic_read(&hdev->cmd_cnt)) {
2759 skb = skb_dequeue(&hdev->cmd_q);
2760 if (!skb)
2761 return;
2762
Wei Yongjun7585b972009-02-25 18:29:52 +08002763 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002765 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2766 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 atomic_dec(&hdev->cmd_cnt);
2768 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002769 if (test_bit(HCI_RESET, &hdev->flags))
2770 del_timer(&hdev->cmd_timer);
2771 else
2772 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002773 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 } else {
2775 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002776 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 }
2778 }
2779}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002780
2781int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2782{
2783 /* General inquiry access code (GIAC) */
2784 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2785 struct hci_cp_inquiry cp;
2786
2787 BT_DBG("%s", hdev->name);
2788
2789 if (test_bit(HCI_INQUIRY, &hdev->flags))
2790 return -EINPROGRESS;
2791
Johan Hedberg46632622012-01-02 16:06:08 +02002792 inquiry_cache_flush(hdev);
2793
Andre Guedes2519a1f2011-11-07 11:45:24 -03002794 memset(&cp, 0, sizeof(cp));
2795 memcpy(&cp.lap, lap, sizeof(cp.lap));
2796 cp.length = length;
2797
2798 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2799}
Andre Guedes023d5042011-11-04 14:16:52 -03002800
2801int hci_cancel_inquiry(struct hci_dev *hdev)
2802{
2803 BT_DBG("%s", hdev->name);
2804
2805 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2806 return -EPERM;
2807
2808 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2809}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002810
2811module_param(enable_hs, bool, 0644);
2812MODULE_PARM_DESC(enable_hs, "Enable High Speed");