blob: ae86cdd80ac0b7499dd533faafd839bd3eb68ba9 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
Andre Guedes6fbe1952012-02-03 17:47:58 -0300363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200367 return true;
368
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 default:
370 return false;
371 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200372}
373
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
383 mgmt_discovering(hdev, 0);
384 break;
385 case DISCOVERY_STARTING:
386 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200387 case DISCOVERY_INQUIRY:
Andre Guedesc5990082012-02-03 17:47:57 -0300388 case DISCOVERY_LE_SCAN:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 mgmt_discovering(hdev, 1);
390 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391 case DISCOVERY_RESOLVING:
392 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 case DISCOVERY_STOPPING:
394 break;
395 }
396
397 hdev->discovery.state = state;
398}
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400static void inquiry_cache_flush(struct hci_dev *hdev)
401{
Johan Hedberg30883512012-01-04 14:16:21 +0200402 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200403 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Johan Hedberg561aafb2012-01-04 13:31:59 +0200405 list_for_each_entry_safe(p, n, &cache->all, all) {
406 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409
410 INIT_LIST_HEAD(&cache->unknown);
411 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200412 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
415struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
416{
Johan Hedberg30883512012-01-04 14:16:21 +0200417 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
Johan Hedberg561aafb2012-01-04 13:31:59 +0200422 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200424 return e;
425 }
426
427 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
Johan Hedberg561aafb2012-01-04 13:31:59 +0200430struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
431 bdaddr_t *bdaddr)
432{
Johan Hedberg30883512012-01-04 14:16:21 +0200433 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200434 struct inquiry_entry *e;
435
436 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
437
438 list_for_each_entry(e, &cache->unknown, list) {
439 if (!bacmp(&e->data.bdaddr, bdaddr))
440 return e;
441 }
442
443 return NULL;
444}
445
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200446struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
447 bdaddr_t *bdaddr,
448 int state)
449{
450 struct discovery_state *cache = &hdev->discovery;
451 struct inquiry_entry *e;
452
453 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
454
455 list_for_each_entry(e, &cache->resolve, list) {
456 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
457 return e;
458 if (!bacmp(&e->data.bdaddr, bdaddr))
459 return e;
460 }
461
462 return NULL;
463}
464
Johan Hedberga3d4e202012-01-09 00:53:02 +0200465void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
466 struct inquiry_entry *ie)
467{
468 struct discovery_state *cache = &hdev->discovery;
469 struct list_head *pos = &cache->resolve;
470 struct inquiry_entry *p;
471
472 list_del(&ie->list);
473
474 list_for_each_entry(p, &cache->resolve, list) {
475 if (p->name_state != NAME_PENDING &&
476 abs(p->data.rssi) >= abs(ie->data.rssi))
477 break;
478 pos = &p->list;
479 }
480
481 list_add(&ie->list, pos);
482}
483
Johan Hedberg31754052012-01-04 13:39:52 +0200484bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200485 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200488 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
491
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200492 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 if (ie) {
494 if (ie->name_state == NAME_NEEDED &&
495 data->rssi != ie->data.rssi) {
496 ie->data.rssi = data->rssi;
497 hci_inquiry_cache_update_resolve(hdev, ie);
498 }
499
Johan Hedberg561aafb2012-01-04 13:31:59 +0200500 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200501 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200502
Johan Hedberg561aafb2012-01-04 13:31:59 +0200503 /* Entry not in the cache. Add new one. */
504 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
505 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200506 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200507
508 list_add(&ie->all, &cache->all);
509
510 if (name_known) {
511 ie->name_state = NAME_KNOWN;
512 } else {
513 ie->name_state = NAME_NOT_KNOWN;
514 list_add(&ie->list, &cache->unknown);
515 }
516
517update:
518 if (name_known && ie->name_state != NAME_KNOWN &&
519 ie->name_state != NAME_PENDING) {
520 ie->name_state = NAME_KNOWN;
521 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
523
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200524 memcpy(&ie->data, data, sizeof(*data));
525 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200527
528 if (ie->name_state == NAME_NOT_KNOWN)
529 return false;
530
531 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
533
534static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
535{
Johan Hedberg30883512012-01-04 14:16:21 +0200536 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_info *info = (struct inquiry_info *) buf;
538 struct inquiry_entry *e;
539 int copied = 0;
540
Johan Hedberg561aafb2012-01-04 13:31:59 +0200541 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200543
544 if (copied >= num)
545 break;
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 bacpy(&info->bdaddr, &data->bdaddr);
548 info->pscan_rep_mode = data->pscan_rep_mode;
549 info->pscan_period_mode = data->pscan_period_mode;
550 info->pscan_mode = data->pscan_mode;
551 memcpy(info->dev_class, data->dev_class, 3);
552 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200555 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
557
558 BT_DBG("cache %p, copied %d", cache, copied);
559 return copied;
560}
561
562static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
563{
564 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
565 struct hci_cp_inquiry cp;
566
567 BT_DBG("%s", hdev->name);
568
569 if (test_bit(HCI_INQUIRY, &hdev->flags))
570 return;
571
572 /* Start Inquiry */
573 memcpy(&cp.lap, &ir->lap, 3);
574 cp.length = ir->length;
575 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200576 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
579int hci_inquiry(void __user *arg)
580{
581 __u8 __user *ptr = arg;
582 struct hci_inquiry_req ir;
583 struct hci_dev *hdev;
584 int err = 0, do_inquiry = 0, max_rsp;
585 long timeo;
586 __u8 *buf;
587
588 if (copy_from_user(&ir, ptr, sizeof(ir)))
589 return -EFAULT;
590
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200591 hdev = hci_dev_get(ir.dev_id);
592 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 return -ENODEV;
594
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900596 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200597 inquiry_cache_empty(hdev) ||
598 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 inquiry_cache_flush(hdev);
600 do_inquiry = 1;
601 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300602 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Marcel Holtmann04837f62006-07-03 10:02:33 +0200604 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200605
606 if (do_inquiry) {
607 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
608 if (err < 0)
609 goto done;
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200619 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 err = -ENOMEM;
621 goto done;
622 }
623
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300624 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633 ir.num_rsp))
634 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100676 set_bit(HCI_RAW, &hdev->flags);
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200686 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Marcel Holtmann04837f62006-07-03 10:02:33 +0200688 ret = __hci_request(hdev, hci_init_req, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Andre Guedeseead27d2011-06-30 19:20:55 -0300691 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300692 ret = __hci_request(hdev, hci_le_init_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 clear_bit(HCI_INIT, &hdev->flags);
696 }
697
698 if (!ret) {
699 hci_dev_hold(hdev);
700 set_bit(HCI_UP, &hdev->flags);
701 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200702 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300703 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200704 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300705 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200706 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900707 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200709 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200710 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400711 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
738 hci_req_cancel(hdev, ENODEV);
739 hci_req_lock(hdev);
740
741 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300742 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 hci_req_unlock(hdev);
744 return 0;
745 }
746
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200747 /* Flush RX and TX works */
748 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400749 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200751 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200752 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200753 hdev->discov_timeout = 0;
754 }
755
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200756 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200757 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200758
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200760 cancel_delayed_work(&hdev->service_cache);
761
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300762 cancel_delayed_work_sync(&hdev->le_scan_disable);
763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300764 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 inquiry_cache_flush(hdev);
766 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300767 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 hci_notify(hdev, HCI_DEV_DOWN);
770
771 if (hdev->flush)
772 hdev->flush(hdev);
773
774 /* Reset device */
775 skb_queue_purge(&hdev->cmd_q);
776 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200777 if (!test_bit(HCI_RAW, &hdev->flags) &&
778 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200780 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200781 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 clear_bit(HCI_INIT, &hdev->flags);
783 }
784
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200785 /* flush cmd work */
786 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
788 /* Drop queues */
789 skb_queue_purge(&hdev->rx_q);
790 skb_queue_purge(&hdev->cmd_q);
791 skb_queue_purge(&hdev->raw_q);
792
793 /* Drop last sent command */
794 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300795 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 kfree_skb(hdev->sent_cmd);
797 hdev->sent_cmd = NULL;
798 }
799
800 /* After this point our queues are empty
801 * and no tasks are scheduled. */
802 hdev->close(hdev);
803
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300804 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200805 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300806 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 /* Clear flags */
809 hdev->flags = 0;
810
811 hci_req_unlock(hdev);
812
813 hci_dev_put(hdev);
814 return 0;
815}
816
817int hci_dev_close(__u16 dev)
818{
819 struct hci_dev *hdev;
820 int err;
821
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200822 hdev = hci_dev_get(dev);
823 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 return -ENODEV;
825 err = hci_dev_do_close(hdev);
826 hci_dev_put(hdev);
827 return err;
828}
829
830int hci_dev_reset(__u16 dev)
831{
832 struct hci_dev *hdev;
833 int ret = 0;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 hdev = hci_dev_get(dev);
836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -ENODEV;
838
839 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841 if (!test_bit(HCI_UP, &hdev->flags))
842 goto done;
843
844 /* Drop queues */
845 skb_queue_purge(&hdev->rx_q);
846 skb_queue_purge(&hdev->cmd_q);
847
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300848 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 inquiry_cache_flush(hdev);
850 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300851 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
853 if (hdev->flush)
854 hdev->flush(hdev);
855
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900856 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300857 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200860 ret = __hci_request(hdev, hci_reset_req, 0,
861 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 hci_req_unlock(hdev);
865 hci_dev_put(hdev);
866 return ret;
867}
868
869int hci_dev_reset_stat(__u16 dev)
870{
871 struct hci_dev *hdev;
872 int ret = 0;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 hdev = hci_dev_get(dev);
875 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 return -ENODEV;
877
878 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
879
880 hci_dev_put(hdev);
881
882 return ret;
883}
884
885int hci_dev_cmd(unsigned int cmd, void __user *arg)
886{
887 struct hci_dev *hdev;
888 struct hci_dev_req dr;
889 int err = 0;
890
891 if (copy_from_user(&dr, arg, sizeof(dr)))
892 return -EFAULT;
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 hdev = hci_dev_get(dr.dev_id);
895 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return -ENODEV;
897
898 switch (cmd) {
899 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200900 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
901 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 break;
903
904 case HCISETENCRYPT:
905 if (!lmp_encrypt_capable(hdev)) {
906 err = -EOPNOTSUPP;
907 break;
908 }
909
910 if (!test_bit(HCI_AUTH, &hdev->flags)) {
911 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 if (err)
915 break;
916 }
917
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 break;
921
922 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
926
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200927 case HCISETLINKPOL:
928 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
930 break;
931
932 case HCISETLINKMODE:
933 hdev->link_mode = ((__u16) dr.dev_opt) &
934 (HCI_LM_MASTER | HCI_LM_ACCEPT);
935 break;
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 case HCISETPTYPE:
938 hdev->pkt_type = (__u16) dr.dev_opt;
939 break;
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
951 default:
952 err = -EINVAL;
953 break;
954 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 hci_dev_put(hdev);
957 return err;
958}
959
960int hci_get_dev_list(void __user *arg)
961{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200962 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 struct hci_dev_list_req *dl;
964 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 int n = 0, size, err;
966 __u16 dev_num;
967
968 if (get_user(dev_num, (__u16 __user *) arg))
969 return -EFAULT;
970
971 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
972 return -EINVAL;
973
974 size = sizeof(*dl) + dev_num * sizeof(*dr);
975
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200976 dl = kzalloc(size, GFP_KERNEL);
977 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 return -ENOMEM;
979
980 dr = dl->dev_req;
981
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200982 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200983 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200985 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 (dr + n)->dev_id = hdev->id;
991 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 if (++n >= dev_num)
994 break;
995 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200996 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 dl->dev_num = n;
999 size = sizeof(*dl) + n * sizeof(*dr);
1000
1001 err = copy_to_user(arg, dl, size);
1002 kfree(dl);
1003
1004 return err ? -EFAULT : 0;
1005}
1006
1007int hci_get_dev_info(void __user *arg)
1008{
1009 struct hci_dev *hdev;
1010 struct hci_dev_info di;
1011 int err = 0;
1012
1013 if (copy_from_user(&di, arg, sizeof(di)))
1014 return -EFAULT;
1015
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001016 hdev = hci_dev_get(di.dev_id);
1017 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 return -ENODEV;
1019
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001020 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001021 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001022
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001023 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1024 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 strcpy(di.name, hdev->name);
1027 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001028 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 di.flags = hdev->flags;
1030 di.pkt_type = hdev->pkt_type;
1031 di.acl_mtu = hdev->acl_mtu;
1032 di.acl_pkts = hdev->acl_pkts;
1033 di.sco_mtu = hdev->sco_mtu;
1034 di.sco_pkts = hdev->sco_pkts;
1035 di.link_policy = hdev->link_policy;
1036 di.link_mode = hdev->link_mode;
1037
1038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1039 memcpy(&di.features, &hdev->features, sizeof(di.features));
1040
1041 if (copy_to_user(arg, &di, sizeof(di)))
1042 err = -EFAULT;
1043
1044 hci_dev_put(hdev);
1045
1046 return err;
1047}
1048
1049/* ---- Interface to HCI drivers ---- */
1050
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001051static int hci_rfkill_set_block(void *data, bool blocked)
1052{
1053 struct hci_dev *hdev = data;
1054
1055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1056
1057 if (!blocked)
1058 return 0;
1059
1060 hci_dev_do_close(hdev);
1061
1062 return 0;
1063}
1064
1065static const struct rfkill_ops hci_rfkill_ops = {
1066 .set_block = hci_rfkill_set_block,
1067};
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/* Alloc HCI device */
1070struct hci_dev *hci_alloc_dev(void)
1071{
1072 struct hci_dev *hdev;
1073
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001074 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if (!hdev)
1076 return NULL;
1077
David Herrmann0ac7e702011-10-08 14:58:47 +02001078 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 skb_queue_head_init(&hdev->driver_init);
1080
1081 return hdev;
1082}
1083EXPORT_SYMBOL(hci_alloc_dev);
1084
1085/* Free HCI device */
1086void hci_free_dev(struct hci_dev *hdev)
1087{
1088 skb_queue_purge(&hdev->driver_init);
1089
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001090 /* will free via device release */
1091 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092}
1093EXPORT_SYMBOL(hci_free_dev);
1094
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001095static void hci_power_on(struct work_struct *work)
1096{
1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1098
1099 BT_DBG("%s", hdev->name);
1100
1101 if (hci_dev_open(hdev->id) < 0)
1102 return;
1103
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001105 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001107
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001109 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001110}
1111
1112static void hci_power_off(struct work_struct *work)
1113{
Johan Hedberg32435532011-11-07 22:16:04 +02001114 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001116
1117 BT_DBG("%s", hdev->name);
1118
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001119 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001120
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001121 hci_dev_close(hdev->id);
1122}
1123
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001124static void hci_discov_off(struct work_struct *work)
1125{
1126 struct hci_dev *hdev;
1127 u8 scan = SCAN_PAGE;
1128
1129 hdev = container_of(work, struct hci_dev, discov_off.work);
1130
1131 BT_DBG("%s", hdev->name);
1132
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001133 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001134
1135 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1136
1137 hdev->discov_timeout = 0;
1138
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001139 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001140}
1141
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001142int hci_uuids_clear(struct hci_dev *hdev)
1143{
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->uuids) {
1147 struct bt_uuid *uuid;
1148
1149 uuid = list_entry(p, struct bt_uuid, list);
1150
1151 list_del(p);
1152 kfree(uuid);
1153 }
1154
1155 return 0;
1156}
1157
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001158int hci_link_keys_clear(struct hci_dev *hdev)
1159{
1160 struct list_head *p, *n;
1161
1162 list_for_each_safe(p, n, &hdev->link_keys) {
1163 struct link_key *key;
1164
1165 key = list_entry(p, struct link_key, list);
1166
1167 list_del(p);
1168 kfree(key);
1169 }
1170
1171 return 0;
1172}
1173
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001174int hci_smp_ltks_clear(struct hci_dev *hdev)
1175{
1176 struct smp_ltk *k, *tmp;
1177
1178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1179 list_del(&k->list);
1180 kfree(k);
1181 }
1182
1183 return 0;
1184}
1185
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001186struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001188 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001189
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001190 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001191 if (bacmp(bdaddr, &k->bdaddr) == 0)
1192 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001193
1194 return NULL;
1195}
1196
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001197static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1198 u8 key_type, u8 old_key_type)
1199{
1200 /* Legacy key */
1201 if (key_type < 0x03)
1202 return 1;
1203
1204 /* Debug keys are insecure so don't store them persistently */
1205 if (key_type == HCI_LK_DEBUG_COMBINATION)
1206 return 0;
1207
1208 /* Changed combination key and there's no previous one */
1209 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1210 return 0;
1211
1212 /* Security mode 3 case */
1213 if (!conn)
1214 return 1;
1215
1216 /* Neither local nor remote side had no-bonding as requirement */
1217 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1218 return 1;
1219
1220 /* Local side had dedicated bonding as requirement */
1221 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1222 return 1;
1223
1224 /* Remote side had dedicated bonding as requirement */
1225 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1226 return 1;
1227
1228 /* If none of the above criteria match, then don't store the key
1229 * persistently */
1230 return 0;
1231}
1232
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 list_for_each_entry(k, &hdev->long_term_keys, list) {
1238 if (k->ediv != ediv ||
1239 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240 continue;
1241
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001242 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243 }
1244
1245 return NULL;
1246}
1247EXPORT_SYMBOL(hci_find_ltk);
1248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001249struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1250 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001253
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254 list_for_each_entry(k, &hdev->long_term_keys, list)
1255 if (addr_type == k->bdaddr_type &&
1256 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257 return k;
1258
1259 return NULL;
1260}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001261EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001263int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1264 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001265{
1266 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001267 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001268
1269 old_key = hci_find_link_key(hdev, bdaddr);
1270 if (old_key) {
1271 old_key_type = old_key->type;
1272 key = old_key;
1273 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001274 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001275 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1276 if (!key)
1277 return -ENOMEM;
1278 list_add(&key->list, &hdev->link_keys);
1279 }
1280
1281 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1282
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001283 /* Some buggy controller combinations generate a changed
1284 * combination key for legacy pairing even when there's no
1285 * previous key */
1286 if (type == HCI_LK_CHANGED_COMBINATION &&
1287 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001288 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001289 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001290 if (conn)
1291 conn->key_type = type;
1292 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001293
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001294 bacpy(&key->bdaddr, bdaddr);
1295 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001296 key->pin_len = pin_len;
1297
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001298 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001299 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001300 else
1301 key->type = type;
1302
Johan Hedberg4df378a2011-04-28 11:29:03 -07001303 if (!new_key)
1304 return 0;
1305
1306 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1307
Johan Hedberg744cf192011-11-08 20:40:14 +02001308 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001309
1310 if (!persistent) {
1311 list_del(&key->list);
1312 kfree(key);
1313 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001314
1315 return 0;
1316}
1317
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001318int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1319 int new_key, u8 authenticated, u8 tk[16],
1320 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1325 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1328 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001330 else {
1331 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 if (!key)
1333 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001334 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 }
1336
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001338 key->bdaddr_type = addr_type;
1339 memcpy(key->val, tk, sizeof(key->val));
1340 key->authenticated = authenticated;
1341 key->ediv = ediv;
1342 key->enc_size = enc_size;
1343 key->type = type;
1344 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001346 if (!new_key)
1347 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001348
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001349 if (type & HCI_SMP_LTK)
1350 mgmt_new_ltk(hdev, key, 1);
1351
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 return 0;
1353}
1354
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001355int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1356{
1357 struct link_key *key;
1358
1359 key = hci_find_link_key(hdev, bdaddr);
1360 if (!key)
1361 return -ENOENT;
1362
1363 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1364
1365 list_del(&key->list);
1366 kfree(key);
1367
1368 return 0;
1369}
1370
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001371int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct smp_ltk *k, *tmp;
1374
1375 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1376 if (bacmp(bdaddr, &k->bdaddr))
1377 continue;
1378
1379 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1380
1381 list_del(&k->list);
1382 kfree(k);
1383 }
1384
1385 return 0;
1386}
1387
Ville Tervo6bd32322011-02-16 16:32:41 +02001388/* HCI command timer function */
1389static void hci_cmd_timer(unsigned long arg)
1390{
1391 struct hci_dev *hdev = (void *) arg;
1392
1393 BT_ERR("%s command tx timeout", hdev->name);
1394 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001395 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001396}
1397
Szymon Janc2763eda2011-03-22 13:12:22 +01001398struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1399 bdaddr_t *bdaddr)
1400{
1401 struct oob_data *data;
1402
1403 list_for_each_entry(data, &hdev->remote_oob_data, list)
1404 if (bacmp(bdaddr, &data->bdaddr) == 0)
1405 return data;
1406
1407 return NULL;
1408}
1409
1410int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1411{
1412 struct oob_data *data;
1413
1414 data = hci_find_remote_oob_data(hdev, bdaddr);
1415 if (!data)
1416 return -ENOENT;
1417
1418 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1419
1420 list_del(&data->list);
1421 kfree(data);
1422
1423 return 0;
1424}
1425
1426int hci_remote_oob_data_clear(struct hci_dev *hdev)
1427{
1428 struct oob_data *data, *n;
1429
1430 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1431 list_del(&data->list);
1432 kfree(data);
1433 }
1434
1435 return 0;
1436}
1437
1438int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1439 u8 *randomizer)
1440{
1441 struct oob_data *data;
1442
1443 data = hci_find_remote_oob_data(hdev, bdaddr);
1444
1445 if (!data) {
1446 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1447 if (!data)
1448 return -ENOMEM;
1449
1450 bacpy(&data->bdaddr, bdaddr);
1451 list_add(&data->list, &hdev->remote_oob_data);
1452 }
1453
1454 memcpy(data->hash, hash, sizeof(data->hash));
1455 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1456
1457 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1458
1459 return 0;
1460}
1461
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1463 bdaddr_t *bdaddr)
1464{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001465 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001467 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001468 if (bacmp(bdaddr, &b->bdaddr) == 0)
1469 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001470
1471 return NULL;
1472}
1473
1474int hci_blacklist_clear(struct hci_dev *hdev)
1475{
1476 struct list_head *p, *n;
1477
1478 list_for_each_safe(p, n, &hdev->blacklist) {
1479 struct bdaddr_list *b;
1480
1481 b = list_entry(p, struct bdaddr_list, list);
1482
1483 list_del(p);
1484 kfree(b);
1485 }
1486
1487 return 0;
1488}
1489
1490int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1491{
1492 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001493
1494 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1495 return -EBADF;
1496
Antti Julku5e762442011-08-25 16:48:02 +03001497 if (hci_blacklist_lookup(hdev, bdaddr))
1498 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001501 if (!entry)
1502 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001503
1504 bacpy(&entry->bdaddr, bdaddr);
1505
1506 list_add(&entry->list, &hdev->blacklist);
1507
Johan Hedberg744cf192011-11-08 20:40:14 +02001508 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509}
1510
1511int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1512{
1513 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514
Szymon Janc1ec918c2011-11-16 09:32:21 +01001515 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001516 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517
1518 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001519 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001520 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001521
1522 list_del(&entry->list);
1523 kfree(entry);
1524
Johan Hedberg744cf192011-11-08 20:40:14 +02001525 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001526}
1527
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001528static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001529{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001530 struct hci_dev *hdev = container_of(work, struct hci_dev,
1531 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001532
1533 hci_dev_lock(hdev);
1534
1535 hci_adv_entries_clear(hdev);
1536
1537 hci_dev_unlock(hdev);
1538}
1539
Andre Guedes76c86862011-05-26 16:23:50 -03001540int hci_adv_entries_clear(struct hci_dev *hdev)
1541{
1542 struct adv_entry *entry, *tmp;
1543
1544 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1545 list_del(&entry->list);
1546 kfree(entry);
1547 }
1548
1549 BT_DBG("%s adv cache cleared", hdev->name);
1550
1551 return 0;
1552}
1553
1554struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1555{
1556 struct adv_entry *entry;
1557
1558 list_for_each_entry(entry, &hdev->adv_entries, list)
1559 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1560 return entry;
1561
1562 return NULL;
1563}
1564
1565static inline int is_connectable_adv(u8 evt_type)
1566{
1567 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1568 return 1;
1569
1570 return 0;
1571}
1572
1573int hci_add_adv_entry(struct hci_dev *hdev,
1574 struct hci_ev_le_advertising_info *ev)
1575{
1576 struct adv_entry *entry;
1577
1578 if (!is_connectable_adv(ev->evt_type))
1579 return -EINVAL;
1580
1581 /* Only new entries should be added to adv_entries. So, if
1582 * bdaddr was found, don't add it. */
1583 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1584 return 0;
1585
Andre Guedes4777bfd2012-01-30 23:31:28 -03001586 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001587 if (!entry)
1588 return -ENOMEM;
1589
1590 bacpy(&entry->bdaddr, &ev->bdaddr);
1591 entry->bdaddr_type = ev->bdaddr_type;
1592
1593 list_add(&entry->list, &hdev->adv_entries);
1594
1595 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1596 batostr(&entry->bdaddr), entry->bdaddr_type);
1597
1598 return 0;
1599}
1600
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001601static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1602{
1603 struct le_scan_params *param = (struct le_scan_params *) opt;
1604 struct hci_cp_le_set_scan_param cp;
1605
1606 memset(&cp, 0, sizeof(cp));
1607 cp.type = param->type;
1608 cp.interval = cpu_to_le16(param->interval);
1609 cp.window = cpu_to_le16(param->window);
1610
1611 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1612}
1613
1614static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1615{
1616 struct hci_cp_le_set_scan_enable cp;
1617
1618 memset(&cp, 0, sizeof(cp));
1619 cp.enable = 1;
1620
1621 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1622}
1623
1624static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1625 u16 window, int timeout)
1626{
1627 long timeo = msecs_to_jiffies(3000);
1628 struct le_scan_params param;
1629 int err;
1630
1631 BT_DBG("%s", hdev->name);
1632
1633 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1634 return -EINPROGRESS;
1635
1636 param.type = type;
1637 param.interval = interval;
1638 param.window = window;
1639
1640 hci_req_lock(hdev);
1641
1642 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1643 timeo);
1644 if (!err)
1645 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1646
1647 hci_req_unlock(hdev);
1648
1649 if (err < 0)
1650 return err;
1651
1652 schedule_delayed_work(&hdev->le_scan_disable,
1653 msecs_to_jiffies(timeout));
1654
1655 return 0;
1656}
1657
1658static void le_scan_disable_work(struct work_struct *work)
1659{
1660 struct hci_dev *hdev = container_of(work, struct hci_dev,
1661 le_scan_disable.work);
1662 struct hci_cp_le_set_scan_enable cp;
1663
1664 BT_DBG("%s", hdev->name);
1665
1666 memset(&cp, 0, sizeof(cp));
1667
1668 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1669}
1670
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671/* Register HCI device */
1672int hci_register_dev(struct hci_dev *hdev)
1673{
1674 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001675 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001677 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678
David Herrmann010666a2012-01-07 15:47:07 +01001679 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 return -EINVAL;
1681
Mat Martineau08add512011-11-02 16:18:36 -07001682 /* Do not allow HCI_AMP devices to register at index 0,
1683 * so the index can be used as the AMP controller ID.
1684 */
1685 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1686
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001687 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 /* Find first available device id */
1690 list_for_each(p, &hci_dev_list) {
1691 if (list_entry(p, struct hci_dev, list)->id != id)
1692 break;
1693 head = p; id++;
1694 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001695
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 sprintf(hdev->name, "hci%d", id);
1697 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001698 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001700 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
1702 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001703 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001705 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001707 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Marcel Holtmann04837f62006-07-03 10:02:33 +02001709 hdev->idle_timeout = 0;
1710 hdev->sniff_max_interval = 800;
1711 hdev->sniff_min_interval = 80;
1712
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001713 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001714 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001715 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 skb_queue_head_init(&hdev->rx_q);
1719 skb_queue_head_init(&hdev->cmd_q);
1720 skb_queue_head_init(&hdev->raw_q);
1721
Ville Tervo6bd32322011-02-16 16:32:41 +02001722 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1723
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301724 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001725 hdev->reassembly[i] = NULL;
1726
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001728 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729
Johan Hedberg30883512012-01-04 14:16:21 +02001730 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 hci_conn_hash_init(hdev);
1733
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001734 INIT_LIST_HEAD(&hdev->mgmt_pending);
1735
David Millerea4bd8b2010-07-30 21:54:49 -07001736 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001737
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001738 INIT_LIST_HEAD(&hdev->uuids);
1739
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001740 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001741 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001742
Szymon Janc2763eda2011-03-22 13:12:22 +01001743 INIT_LIST_HEAD(&hdev->remote_oob_data);
1744
Andre Guedes76c86862011-05-26 16:23:50 -03001745 INIT_LIST_HEAD(&hdev->adv_entries);
1746
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001747 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001748 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001749 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001750
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001751 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1754
1755 atomic_set(&hdev->promisc, 0);
1756
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001757 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1758
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001759 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001761 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1762 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001763 if (!hdev->workqueue) {
1764 error = -ENOMEM;
1765 goto err;
1766 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001767
David Herrmann33ca9542011-10-08 14:58:49 +02001768 error = hci_add_sysfs(hdev);
1769 if (error < 0)
1770 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001772 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1773 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1774 if (hdev->rfkill) {
1775 if (rfkill_register(hdev->rfkill) < 0) {
1776 rfkill_destroy(hdev->rfkill);
1777 hdev->rfkill = NULL;
1778 }
1779 }
1780
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001781 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1782 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001783 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001786 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787
1788 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001789
David Herrmann33ca9542011-10-08 14:58:49 +02001790err_wqueue:
1791 destroy_workqueue(hdev->workqueue);
1792err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001793 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001794 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001795 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001796
David Herrmann33ca9542011-10-08 14:58:49 +02001797 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798}
1799EXPORT_SYMBOL(hci_register_dev);
1800
1801/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001802void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803{
Marcel Holtmannef222012007-07-11 06:42:04 +02001804 int i;
1805
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001806 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001808 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001810 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 hci_dev_do_close(hdev);
1813
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301814 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001815 kfree_skb(hdev->reassembly[i]);
1816
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001817 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001818 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001819 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001820 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001821 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001822 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001823
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001824 /* mgmt_index_removed should take care of emptying the
1825 * pending list */
1826 BUG_ON(!list_empty(&hdev->mgmt_pending));
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 hci_notify(hdev, HCI_DEV_UNREG);
1829
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001830 if (hdev->rfkill) {
1831 rfkill_unregister(hdev->rfkill);
1832 rfkill_destroy(hdev->rfkill);
1833 }
1834
David Herrmannce242972011-10-08 14:58:48 +02001835 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001836
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001837 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001838
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001839 destroy_workqueue(hdev->workqueue);
1840
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001841 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001842 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001843 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001844 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001845 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001846 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001847 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001848 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001849
David Herrmanndc946bd2012-01-07 15:47:24 +01001850 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851}
1852EXPORT_SYMBOL(hci_unregister_dev);
1853
1854/* Suspend HCI device */
1855int hci_suspend_dev(struct hci_dev *hdev)
1856{
1857 hci_notify(hdev, HCI_DEV_SUSPEND);
1858 return 0;
1859}
1860EXPORT_SYMBOL(hci_suspend_dev);
1861
1862/* Resume HCI device */
1863int hci_resume_dev(struct hci_dev *hdev)
1864{
1865 hci_notify(hdev, HCI_DEV_RESUME);
1866 return 0;
1867}
1868EXPORT_SYMBOL(hci_resume_dev);
1869
Marcel Holtmann76bca882009-11-18 00:40:39 +01001870/* Receive frame from HCI drivers */
1871int hci_recv_frame(struct sk_buff *skb)
1872{
1873 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1874 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1875 && !test_bit(HCI_INIT, &hdev->flags))) {
1876 kfree_skb(skb);
1877 return -ENXIO;
1878 }
1879
1880 /* Incomming skb */
1881 bt_cb(skb)->incoming = 1;
1882
1883 /* Time stamp */
1884 __net_timestamp(skb);
1885
Marcel Holtmann76bca882009-11-18 00:40:39 +01001886 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001887 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001888
Marcel Holtmann76bca882009-11-18 00:40:39 +01001889 return 0;
1890}
1891EXPORT_SYMBOL(hci_recv_frame);
1892
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301893static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001894 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301895{
1896 int len = 0;
1897 int hlen = 0;
1898 int remain = count;
1899 struct sk_buff *skb;
1900 struct bt_skb_cb *scb;
1901
1902 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1903 index >= NUM_REASSEMBLY)
1904 return -EILSEQ;
1905
1906 skb = hdev->reassembly[index];
1907
1908 if (!skb) {
1909 switch (type) {
1910 case HCI_ACLDATA_PKT:
1911 len = HCI_MAX_FRAME_SIZE;
1912 hlen = HCI_ACL_HDR_SIZE;
1913 break;
1914 case HCI_EVENT_PKT:
1915 len = HCI_MAX_EVENT_SIZE;
1916 hlen = HCI_EVENT_HDR_SIZE;
1917 break;
1918 case HCI_SCODATA_PKT:
1919 len = HCI_MAX_SCO_SIZE;
1920 hlen = HCI_SCO_HDR_SIZE;
1921 break;
1922 }
1923
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001924 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301925 if (!skb)
1926 return -ENOMEM;
1927
1928 scb = (void *) skb->cb;
1929 scb->expect = hlen;
1930 scb->pkt_type = type;
1931
1932 skb->dev = (void *) hdev;
1933 hdev->reassembly[index] = skb;
1934 }
1935
1936 while (count) {
1937 scb = (void *) skb->cb;
1938 len = min(scb->expect, (__u16)count);
1939
1940 memcpy(skb_put(skb, len), data, len);
1941
1942 count -= len;
1943 data += len;
1944 scb->expect -= len;
1945 remain = count;
1946
1947 switch (type) {
1948 case HCI_EVENT_PKT:
1949 if (skb->len == HCI_EVENT_HDR_SIZE) {
1950 struct hci_event_hdr *h = hci_event_hdr(skb);
1951 scb->expect = h->plen;
1952
1953 if (skb_tailroom(skb) < scb->expect) {
1954 kfree_skb(skb);
1955 hdev->reassembly[index] = NULL;
1956 return -ENOMEM;
1957 }
1958 }
1959 break;
1960
1961 case HCI_ACLDATA_PKT:
1962 if (skb->len == HCI_ACL_HDR_SIZE) {
1963 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1964 scb->expect = __le16_to_cpu(h->dlen);
1965
1966 if (skb_tailroom(skb) < scb->expect) {
1967 kfree_skb(skb);
1968 hdev->reassembly[index] = NULL;
1969 return -ENOMEM;
1970 }
1971 }
1972 break;
1973
1974 case HCI_SCODATA_PKT:
1975 if (skb->len == HCI_SCO_HDR_SIZE) {
1976 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1977 scb->expect = h->dlen;
1978
1979 if (skb_tailroom(skb) < scb->expect) {
1980 kfree_skb(skb);
1981 hdev->reassembly[index] = NULL;
1982 return -ENOMEM;
1983 }
1984 }
1985 break;
1986 }
1987
1988 if (scb->expect == 0) {
1989 /* Complete frame */
1990
1991 bt_cb(skb)->pkt_type = type;
1992 hci_recv_frame(skb);
1993
1994 hdev->reassembly[index] = NULL;
1995 return remain;
1996 }
1997 }
1998
1999 return remain;
2000}
2001
Marcel Holtmannef222012007-07-11 06:42:04 +02002002int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2003{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302004 int rem = 0;
2005
Marcel Holtmannef222012007-07-11 06:42:04 +02002006 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2007 return -EILSEQ;
2008
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002009 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002010 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302011 if (rem < 0)
2012 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002013
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302014 data += (count - rem);
2015 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002016 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002017
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302018 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002019}
2020EXPORT_SYMBOL(hci_recv_fragment);
2021
Suraj Sumangala99811512010-07-14 13:02:19 +05302022#define STREAM_REASSEMBLY 0
2023
2024int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2025{
2026 int type;
2027 int rem = 0;
2028
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002029 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302030 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2031
2032 if (!skb) {
2033 struct { char type; } *pkt;
2034
2035 /* Start of the frame */
2036 pkt = data;
2037 type = pkt->type;
2038
2039 data++;
2040 count--;
2041 } else
2042 type = bt_cb(skb)->pkt_type;
2043
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002044 rem = hci_reassembly(hdev, type, data, count,
2045 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302046 if (rem < 0)
2047 return rem;
2048
2049 data += (count - rem);
2050 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002051 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302052
2053 return rem;
2054}
2055EXPORT_SYMBOL(hci_recv_stream_fragment);
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057/* ---- Interface to upper protocols ---- */
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059int hci_register_cb(struct hci_cb *cb)
2060{
2061 BT_DBG("%p name %s", cb, cb->name);
2062
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002063 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002065 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
2067 return 0;
2068}
2069EXPORT_SYMBOL(hci_register_cb);
2070
2071int hci_unregister_cb(struct hci_cb *cb)
2072{
2073 BT_DBG("%p name %s", cb, cb->name);
2074
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002075 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002077 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
2079 return 0;
2080}
2081EXPORT_SYMBOL(hci_unregister_cb);
2082
2083static int hci_send_frame(struct sk_buff *skb)
2084{
2085 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2086
2087 if (!hdev) {
2088 kfree_skb(skb);
2089 return -ENODEV;
2090 }
2091
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002092 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 if (atomic_read(&hdev->promisc)) {
2095 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002096 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002098 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 }
2100
2101 /* Get rid of skb owner, prior to sending to the driver. */
2102 skb_orphan(skb);
2103
2104 return hdev->send(skb);
2105}
2106
2107/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002108int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
2110 int len = HCI_COMMAND_HDR_SIZE + plen;
2111 struct hci_command_hdr *hdr;
2112 struct sk_buff *skb;
2113
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002114 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116 skb = bt_skb_alloc(len, GFP_ATOMIC);
2117 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002118 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 return -ENOMEM;
2120 }
2121
2122 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002123 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 hdr->plen = plen;
2125
2126 if (plen)
2127 memcpy(skb_put(skb, plen), param, plen);
2128
2129 BT_DBG("skb len %d", skb->len);
2130
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002131 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002133
Johan Hedberga5040ef2011-01-10 13:28:59 +02002134 if (test_bit(HCI_INIT, &hdev->flags))
2135 hdev->init_last_cmd = opcode;
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002138 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 return 0;
2141}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002144void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
2146 struct hci_command_hdr *hdr;
2147
2148 if (!hdev->sent_cmd)
2149 return NULL;
2150
2151 hdr = (void *) hdev->sent_cmd->data;
2152
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002153 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 return NULL;
2155
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002156 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
2158 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2159}
2160
2161/* Send ACL data */
2162static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2163{
2164 struct hci_acl_hdr *hdr;
2165 int len = skb->len;
2166
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002167 skb_push(skb, HCI_ACL_HDR_SIZE);
2168 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002169 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002170 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2171 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172}
2173
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002174static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2175 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176{
2177 struct hci_dev *hdev = conn->hdev;
2178 struct sk_buff *list;
2179
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002180 list = skb_shinfo(skb)->frag_list;
2181 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 /* Non fragmented */
2183 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2184
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002185 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 } else {
2187 /* Fragmented */
2188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2189
2190 skb_shinfo(skb)->frag_list = NULL;
2191
2192 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002193 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002195 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002196
2197 flags &= ~ACL_START;
2198 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 do {
2200 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002203 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002204 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2207
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002208 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 } while (list);
2210
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002211 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002213}
2214
2215void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216{
2217 struct hci_conn *conn = chan->conn;
2218 struct hci_dev *hdev = conn->hdev;
2219
2220 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2221
2222 skb->dev = (void *) hdev;
2223 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2224 hci_add_acl_hdr(skb, conn->handle, flags);
2225
2226 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002228 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229}
2230EXPORT_SYMBOL(hci_send_acl);
2231
2232/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002233void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234{
2235 struct hci_dev *hdev = conn->hdev;
2236 struct hci_sco_hdr hdr;
2237
2238 BT_DBG("%s len %d", hdev->name, skb->len);
2239
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002240 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 hdr.dlen = skb->len;
2242
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002243 skb_push(skb, HCI_SCO_HDR_SIZE);
2244 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002245 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
2247 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002248 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002251 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252}
2253EXPORT_SYMBOL(hci_send_sco);
2254
2255/* ---- HCI TX task (outgoing data) ---- */
2256
2257/* HCI Connection scheduler */
2258static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2259{
2260 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002261 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002264 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002266
2267 rcu_read_lock();
2268
2269 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002270 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002272
2273 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2274 continue;
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 num++;
2277
2278 if (c->sent < min) {
2279 min = c->sent;
2280 conn = c;
2281 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002282
2283 if (hci_conn_num(hdev, type) == num)
2284 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 }
2286
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002287 rcu_read_unlock();
2288
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002290 int cnt, q;
2291
2292 switch (conn->type) {
2293 case ACL_LINK:
2294 cnt = hdev->acl_cnt;
2295 break;
2296 case SCO_LINK:
2297 case ESCO_LINK:
2298 cnt = hdev->sco_cnt;
2299 break;
2300 case LE_LINK:
2301 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2302 break;
2303 default:
2304 cnt = 0;
2305 BT_ERR("Unknown link type");
2306 }
2307
2308 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 *quote = q ? q : 1;
2310 } else
2311 *quote = 0;
2312
2313 BT_DBG("conn %p quote %d", conn, *quote);
2314 return conn;
2315}
2316
Ville Tervobae1f5d2011-02-10 22:38:53 -03002317static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318{
2319 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002320 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
Ville Tervobae1f5d2011-02-10 22:38:53 -03002322 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002324 rcu_read_lock();
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002327 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002328 if (c->type == type && c->sent) {
2329 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 hdev->name, batostr(&c->dst));
2331 hci_acl_disconn(c, 0x13);
2332 }
2333 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002334
2335 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336}
2337
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002338static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2339 int *quote)
2340{
2341 struct hci_conn_hash *h = &hdev->conn_hash;
2342 struct hci_chan *chan = NULL;
2343 int num = 0, min = ~0, cur_prio = 0;
2344 struct hci_conn *conn;
2345 int cnt, q, conn_num = 0;
2346
2347 BT_DBG("%s", hdev->name);
2348
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002349 rcu_read_lock();
2350
2351 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002352 struct hci_chan *tmp;
2353
2354 if (conn->type != type)
2355 continue;
2356
2357 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2358 continue;
2359
2360 conn_num++;
2361
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002362 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002363 struct sk_buff *skb;
2364
2365 if (skb_queue_empty(&tmp->data_q))
2366 continue;
2367
2368 skb = skb_peek(&tmp->data_q);
2369 if (skb->priority < cur_prio)
2370 continue;
2371
2372 if (skb->priority > cur_prio) {
2373 num = 0;
2374 min = ~0;
2375 cur_prio = skb->priority;
2376 }
2377
2378 num++;
2379
2380 if (conn->sent < min) {
2381 min = conn->sent;
2382 chan = tmp;
2383 }
2384 }
2385
2386 if (hci_conn_num(hdev, type) == conn_num)
2387 break;
2388 }
2389
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002390 rcu_read_unlock();
2391
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002392 if (!chan)
2393 return NULL;
2394
2395 switch (chan->conn->type) {
2396 case ACL_LINK:
2397 cnt = hdev->acl_cnt;
2398 break;
2399 case SCO_LINK:
2400 case ESCO_LINK:
2401 cnt = hdev->sco_cnt;
2402 break;
2403 case LE_LINK:
2404 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2405 break;
2406 default:
2407 cnt = 0;
2408 BT_ERR("Unknown link type");
2409 }
2410
2411 q = cnt / num;
2412 *quote = q ? q : 1;
2413 BT_DBG("chan %p quote %d", chan, *quote);
2414 return chan;
2415}
2416
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002417static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2418{
2419 struct hci_conn_hash *h = &hdev->conn_hash;
2420 struct hci_conn *conn;
2421 int num = 0;
2422
2423 BT_DBG("%s", hdev->name);
2424
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002425 rcu_read_lock();
2426
2427 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002428 struct hci_chan *chan;
2429
2430 if (conn->type != type)
2431 continue;
2432
2433 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2434 continue;
2435
2436 num++;
2437
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002438 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002439 struct sk_buff *skb;
2440
2441 if (chan->sent) {
2442 chan->sent = 0;
2443 continue;
2444 }
2445
2446 if (skb_queue_empty(&chan->data_q))
2447 continue;
2448
2449 skb = skb_peek(&chan->data_q);
2450 if (skb->priority >= HCI_PRIO_MAX - 1)
2451 continue;
2452
2453 skb->priority = HCI_PRIO_MAX - 1;
2454
2455 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2456 skb->priority);
2457 }
2458
2459 if (hci_conn_num(hdev, type) == num)
2460 break;
2461 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002462
2463 rcu_read_unlock();
2464
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002465}
2466
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002467static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2468{
2469 /* Calculate count of blocks used by this packet */
2470 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2471}
2472
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002473static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 if (!test_bit(HCI_RAW, &hdev->flags)) {
2476 /* ACL tx timeout must be longer than maximum
2477 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002478 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002479 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002480 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002482}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002484static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2485{
2486 unsigned int cnt = hdev->acl_cnt;
2487 struct hci_chan *chan;
2488 struct sk_buff *skb;
2489 int quote;
2490
2491 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002492
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002493 while (hdev->acl_cnt &&
2494 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002495 u32 priority = (skb_peek(&chan->data_q))->priority;
2496 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002497 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2498 skb->len, skb->priority);
2499
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002500 /* Stop if priority has changed */
2501 if (skb->priority < priority)
2502 break;
2503
2504 skb = skb_dequeue(&chan->data_q);
2505
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002506 hci_conn_enter_active_mode(chan->conn,
2507 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002508
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 hci_send_frame(skb);
2510 hdev->acl_last_tx = jiffies;
2511
2512 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002513 chan->sent++;
2514 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 }
2516 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002517
2518 if (cnt != hdev->acl_cnt)
2519 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520}
2521
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002522static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2523{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002524 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002525 struct hci_chan *chan;
2526 struct sk_buff *skb;
2527 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002528
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002529 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002530
2531 while (hdev->block_cnt > 0 &&
2532 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2533 u32 priority = (skb_peek(&chan->data_q))->priority;
2534 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2535 int blocks;
2536
2537 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2538 skb->len, skb->priority);
2539
2540 /* Stop if priority has changed */
2541 if (skb->priority < priority)
2542 break;
2543
2544 skb = skb_dequeue(&chan->data_q);
2545
2546 blocks = __get_blocks(hdev, skb);
2547 if (blocks > hdev->block_cnt)
2548 return;
2549
2550 hci_conn_enter_active_mode(chan->conn,
2551 bt_cb(skb)->force_active);
2552
2553 hci_send_frame(skb);
2554 hdev->acl_last_tx = jiffies;
2555
2556 hdev->block_cnt -= blocks;
2557 quote -= blocks;
2558
2559 chan->sent += blocks;
2560 chan->conn->sent += blocks;
2561 }
2562 }
2563
2564 if (cnt != hdev->block_cnt)
2565 hci_prio_recalculate(hdev, ACL_LINK);
2566}
2567
2568static inline void hci_sched_acl(struct hci_dev *hdev)
2569{
2570 BT_DBG("%s", hdev->name);
2571
2572 if (!hci_conn_num(hdev, ACL_LINK))
2573 return;
2574
2575 switch (hdev->flow_ctl_mode) {
2576 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2577 hci_sched_acl_pkt(hdev);
2578 break;
2579
2580 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2581 hci_sched_acl_blk(hdev);
2582 break;
2583 }
2584}
2585
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586/* Schedule SCO */
2587static inline void hci_sched_sco(struct hci_dev *hdev)
2588{
2589 struct hci_conn *conn;
2590 struct sk_buff *skb;
2591 int quote;
2592
2593 BT_DBG("%s", hdev->name);
2594
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002595 if (!hci_conn_num(hdev, SCO_LINK))
2596 return;
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2599 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2600 BT_DBG("skb %p len %d", skb, skb->len);
2601 hci_send_frame(skb);
2602
2603 conn->sent++;
2604 if (conn->sent == ~0)
2605 conn->sent = 0;
2606 }
2607 }
2608}
2609
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002610static inline void hci_sched_esco(struct hci_dev *hdev)
2611{
2612 struct hci_conn *conn;
2613 struct sk_buff *skb;
2614 int quote;
2615
2616 BT_DBG("%s", hdev->name);
2617
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002618 if (!hci_conn_num(hdev, ESCO_LINK))
2619 return;
2620
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002621 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2622 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2623 BT_DBG("skb %p len %d", skb, skb->len);
2624 hci_send_frame(skb);
2625
2626 conn->sent++;
2627 if (conn->sent == ~0)
2628 conn->sent = 0;
2629 }
2630 }
2631}
2632
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002633static inline void hci_sched_le(struct hci_dev *hdev)
2634{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002635 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002636 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002637 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002638
2639 BT_DBG("%s", hdev->name);
2640
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002641 if (!hci_conn_num(hdev, LE_LINK))
2642 return;
2643
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002644 if (!test_bit(HCI_RAW, &hdev->flags)) {
2645 /* LE tx timeout must be longer than maximum
2646 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002647 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002649 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002650 }
2651
2652 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002653 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002654 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002655 u32 priority = (skb_peek(&chan->data_q))->priority;
2656 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002657 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2658 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002659
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002660 /* Stop if priority has changed */
2661 if (skb->priority < priority)
2662 break;
2663
2664 skb = skb_dequeue(&chan->data_q);
2665
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002666 hci_send_frame(skb);
2667 hdev->le_last_tx = jiffies;
2668
2669 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002670 chan->sent++;
2671 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002672 }
2673 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002674
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002675 if (hdev->le_pkts)
2676 hdev->le_cnt = cnt;
2677 else
2678 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002679
2680 if (cnt != tmp)
2681 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002682}
2683
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002684static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002686 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 struct sk_buff *skb;
2688
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002689 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2690 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 /* Schedule queues and send stuff to HCI driver */
2693
2694 hci_sched_acl(hdev);
2695
2696 hci_sched_sco(hdev);
2697
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002698 hci_sched_esco(hdev);
2699
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002700 hci_sched_le(hdev);
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 /* Send next queued raw (unknown type) packet */
2703 while ((skb = skb_dequeue(&hdev->raw_q)))
2704 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705}
2706
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002707/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
2709/* ACL data packet */
2710static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2711{
2712 struct hci_acl_hdr *hdr = (void *) skb->data;
2713 struct hci_conn *conn;
2714 __u16 handle, flags;
2715
2716 skb_pull(skb, HCI_ACL_HDR_SIZE);
2717
2718 handle = __le16_to_cpu(hdr->handle);
2719 flags = hci_flags(handle);
2720 handle = hci_handle(handle);
2721
2722 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2723
2724 hdev->stat.acl_rx++;
2725
2726 hci_dev_lock(hdev);
2727 conn = hci_conn_hash_lookup_handle(hdev, handle);
2728 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002729
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002731 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002734 l2cap_recv_acldata(conn, skb, flags);
2735 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002737 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 hdev->name, handle);
2739 }
2740
2741 kfree_skb(skb);
2742}
2743
2744/* SCO data packet */
2745static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2746{
2747 struct hci_sco_hdr *hdr = (void *) skb->data;
2748 struct hci_conn *conn;
2749 __u16 handle;
2750
2751 skb_pull(skb, HCI_SCO_HDR_SIZE);
2752
2753 handle = __le16_to_cpu(hdr->handle);
2754
2755 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2756
2757 hdev->stat.sco_rx++;
2758
2759 hci_dev_lock(hdev);
2760 conn = hci_conn_hash_lookup_handle(hdev, handle);
2761 hci_dev_unlock(hdev);
2762
2763 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002765 sco_recv_scodata(conn, skb);
2766 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002768 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 hdev->name, handle);
2770 }
2771
2772 kfree_skb(skb);
2773}
2774
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002775static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002777 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 struct sk_buff *skb;
2779
2780 BT_DBG("%s", hdev->name);
2781
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 while ((skb = skb_dequeue(&hdev->rx_q))) {
2783 if (atomic_read(&hdev->promisc)) {
2784 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002785 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 }
2787
2788 if (test_bit(HCI_RAW, &hdev->flags)) {
2789 kfree_skb(skb);
2790 continue;
2791 }
2792
2793 if (test_bit(HCI_INIT, &hdev->flags)) {
2794 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002795 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 case HCI_ACLDATA_PKT:
2797 case HCI_SCODATA_PKT:
2798 kfree_skb(skb);
2799 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002800 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 }
2802
2803 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002804 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002806 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 hci_event_packet(hdev, skb);
2808 break;
2809
2810 case HCI_ACLDATA_PKT:
2811 BT_DBG("%s ACL data packet", hdev->name);
2812 hci_acldata_packet(hdev, skb);
2813 break;
2814
2815 case HCI_SCODATA_PKT:
2816 BT_DBG("%s SCO data packet", hdev->name);
2817 hci_scodata_packet(hdev, skb);
2818 break;
2819
2820 default:
2821 kfree_skb(skb);
2822 break;
2823 }
2824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825}
2826
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002827static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002829 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 struct sk_buff *skb;
2831
2832 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002835 if (atomic_read(&hdev->cmd_cnt)) {
2836 skb = skb_dequeue(&hdev->cmd_q);
2837 if (!skb)
2838 return;
2839
Wei Yongjun7585b972009-02-25 18:29:52 +08002840 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002842 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2843 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 atomic_dec(&hdev->cmd_cnt);
2845 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002846 if (test_bit(HCI_RESET, &hdev->flags))
2847 del_timer(&hdev->cmd_timer);
2848 else
2849 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002850 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 } else {
2852 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002853 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 }
2855 }
2856}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002857
2858int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2859{
2860 /* General inquiry access code (GIAC) */
2861 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2862 struct hci_cp_inquiry cp;
2863
2864 BT_DBG("%s", hdev->name);
2865
2866 if (test_bit(HCI_INQUIRY, &hdev->flags))
2867 return -EINPROGRESS;
2868
Johan Hedberg46632622012-01-02 16:06:08 +02002869 inquiry_cache_flush(hdev);
2870
Andre Guedes2519a1f2011-11-07 11:45:24 -03002871 memset(&cp, 0, sizeof(cp));
2872 memcpy(&cp.lap, lap, sizeof(cp.lap));
2873 cp.length = length;
2874
2875 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2876}
Andre Guedes023d5042011-11-04 14:16:52 -03002877
2878int hci_cancel_inquiry(struct hci_dev *hdev)
2879{
2880 BT_DBG("%s", hdev->name);
2881
2882 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2883 return -EPERM;
2884
2885 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2886}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002887
2888module_param(enable_hs, bool, 0644);
2889MODULE_PARM_DESC(enable_hs, "Enable High Speed");