blob: 9ada16449aed63b588adf5257c6e1037e65e1e80 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
Andre Guedes6fbe1952012-02-03 17:47:58 -0300363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200367 return true;
368
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 default:
370 return false;
371 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200372}
373
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
383 mgmt_discovering(hdev, 0);
384 break;
385 case DISCOVERY_STARTING:
386 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200387 case DISCOVERY_INQUIRY:
Andre Guedesc5990082012-02-03 17:47:57 -0300388 case DISCOVERY_LE_SCAN:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 mgmt_discovering(hdev, 1);
390 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391 case DISCOVERY_RESOLVING:
392 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 case DISCOVERY_STOPPING:
394 break;
395 }
396
397 hdev->discovery.state = state;
398}
399
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400static void inquiry_cache_flush(struct hci_dev *hdev)
401{
Johan Hedberg30883512012-01-04 14:16:21 +0200402 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200403 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
Johan Hedberg561aafb2012-01-04 13:31:59 +0200405 list_for_each_entry_safe(p, n, &cache->all, all) {
406 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409
410 INIT_LIST_HEAD(&cache->unknown);
411 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200412 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413}
414
415struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
416{
Johan Hedberg30883512012-01-04 14:16:21 +0200417 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
Johan Hedberg561aafb2012-01-04 13:31:59 +0200422 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200424 return e;
425 }
426
427 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
Johan Hedberg561aafb2012-01-04 13:31:59 +0200430struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
431 bdaddr_t *bdaddr)
432{
Johan Hedberg30883512012-01-04 14:16:21 +0200433 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200434 struct inquiry_entry *e;
435
436 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
437
438 list_for_each_entry(e, &cache->unknown, list) {
439 if (!bacmp(&e->data.bdaddr, bdaddr))
440 return e;
441 }
442
443 return NULL;
444}
445
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200446struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
447 bdaddr_t *bdaddr,
448 int state)
449{
450 struct discovery_state *cache = &hdev->discovery;
451 struct inquiry_entry *e;
452
453 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
454
455 list_for_each_entry(e, &cache->resolve, list) {
456 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
457 return e;
458 if (!bacmp(&e->data.bdaddr, bdaddr))
459 return e;
460 }
461
462 return NULL;
463}
464
Johan Hedberga3d4e202012-01-09 00:53:02 +0200465void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
466 struct inquiry_entry *ie)
467{
468 struct discovery_state *cache = &hdev->discovery;
469 struct list_head *pos = &cache->resolve;
470 struct inquiry_entry *p;
471
472 list_del(&ie->list);
473
474 list_for_each_entry(p, &cache->resolve, list) {
475 if (p->name_state != NAME_PENDING &&
476 abs(p->data.rssi) >= abs(ie->data.rssi))
477 break;
478 pos = &p->list;
479 }
480
481 list_add(&ie->list, pos);
482}
483
Johan Hedberg31754052012-01-04 13:39:52 +0200484bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200485 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200488 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
491
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200492 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 if (ie) {
494 if (ie->name_state == NAME_NEEDED &&
495 data->rssi != ie->data.rssi) {
496 ie->data.rssi = data->rssi;
497 hci_inquiry_cache_update_resolve(hdev, ie);
498 }
499
Johan Hedberg561aafb2012-01-04 13:31:59 +0200500 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200501 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200502
Johan Hedberg561aafb2012-01-04 13:31:59 +0200503 /* Entry not in the cache. Add new one. */
504 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
505 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200506 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200507
508 list_add(&ie->all, &cache->all);
509
510 if (name_known) {
511 ie->name_state = NAME_KNOWN;
512 } else {
513 ie->name_state = NAME_NOT_KNOWN;
514 list_add(&ie->list, &cache->unknown);
515 }
516
517update:
518 if (name_known && ie->name_state != NAME_KNOWN &&
519 ie->name_state != NAME_PENDING) {
520 ie->name_state = NAME_KNOWN;
521 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
523
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200524 memcpy(&ie->data, data, sizeof(*data));
525 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200527
528 if (ie->name_state == NAME_NOT_KNOWN)
529 return false;
530
531 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532}
533
534static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
535{
Johan Hedberg30883512012-01-04 14:16:21 +0200536 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_info *info = (struct inquiry_info *) buf;
538 struct inquiry_entry *e;
539 int copied = 0;
540
Johan Hedberg561aafb2012-01-04 13:31:59 +0200541 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200543
544 if (copied >= num)
545 break;
546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 bacpy(&info->bdaddr, &data->bdaddr);
548 info->pscan_rep_mode = data->pscan_rep_mode;
549 info->pscan_period_mode = data->pscan_period_mode;
550 info->pscan_mode = data->pscan_mode;
551 memcpy(info->dev_class, data->dev_class, 3);
552 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200553
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200555 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 }
557
558 BT_DBG("cache %p, copied %d", cache, copied);
559 return copied;
560}
561
562static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
563{
564 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
565 struct hci_cp_inquiry cp;
566
567 BT_DBG("%s", hdev->name);
568
569 if (test_bit(HCI_INQUIRY, &hdev->flags))
570 return;
571
572 /* Start Inquiry */
573 memcpy(&cp.lap, &ir->lap, 3);
574 cp.length = ir->length;
575 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200576 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577}
578
579int hci_inquiry(void __user *arg)
580{
581 __u8 __user *ptr = arg;
582 struct hci_inquiry_req ir;
583 struct hci_dev *hdev;
584 int err = 0, do_inquiry = 0, max_rsp;
585 long timeo;
586 __u8 *buf;
587
588 if (copy_from_user(&ir, ptr, sizeof(ir)))
589 return -EFAULT;
590
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200591 hdev = hci_dev_get(ir.dev_id);
592 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 return -ENODEV;
594
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900596 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200597 inquiry_cache_empty(hdev) ||
598 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 inquiry_cache_flush(hdev);
600 do_inquiry = 1;
601 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300602 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Marcel Holtmann04837f62006-07-03 10:02:33 +0200604 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200605
606 if (do_inquiry) {
607 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
608 if (err < 0)
609 goto done;
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200619 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 err = -ENOMEM;
621 goto done;
622 }
623
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300624 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633 ir.num_rsp))
634 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100676 set_bit(HCI_RAW, &hdev->flags);
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200686 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Marcel Holtmann04837f62006-07-03 10:02:33 +0200688 ret = __hci_request(hdev, hci_init_req, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Andre Guedeseead27d2011-06-30 19:20:55 -0300691 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300692 ret = __hci_request(hdev, hci_le_init_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 clear_bit(HCI_INIT, &hdev->flags);
696 }
697
698 if (!ret) {
699 hci_dev_hold(hdev);
700 set_bit(HCI_UP, &hdev->flags);
701 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200702 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300703 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200704 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300705 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200706 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900707 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200709 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200710 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400711 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
Andre Guedes28b75a82012-02-03 17:48:00 -0300738 cancel_work_sync(&hdev->le_scan);
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 hci_req_cancel(hdev, ENODEV);
741 hci_req_lock(hdev);
742
743 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300744 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 hci_req_unlock(hdev);
746 return 0;
747 }
748
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200749 /* Flush RX and TX works */
750 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400751 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200753 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200754 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200755 hdev->discov_timeout = 0;
756 }
757
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200758 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200759 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200761 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200762 cancel_delayed_work(&hdev->service_cache);
763
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300764 cancel_delayed_work_sync(&hdev->le_scan_disable);
765
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300766 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 inquiry_cache_flush(hdev);
768 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300769 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771 hci_notify(hdev, HCI_DEV_DOWN);
772
773 if (hdev->flush)
774 hdev->flush(hdev);
775
776 /* Reset device */
777 skb_queue_purge(&hdev->cmd_q);
778 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200779 if (!test_bit(HCI_RAW, &hdev->flags) &&
780 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200782 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200783 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 clear_bit(HCI_INIT, &hdev->flags);
785 }
786
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200787 /* flush cmd work */
788 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 /* Drop queues */
791 skb_queue_purge(&hdev->rx_q);
792 skb_queue_purge(&hdev->cmd_q);
793 skb_queue_purge(&hdev->raw_q);
794
795 /* Drop last sent command */
796 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300797 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 kfree_skb(hdev->sent_cmd);
799 hdev->sent_cmd = NULL;
800 }
801
802 /* After this point our queues are empty
803 * and no tasks are scheduled. */
804 hdev->close(hdev);
805
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300806 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200807 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300808 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /* Clear flags */
811 hdev->flags = 0;
812
813 hci_req_unlock(hdev);
814
815 hci_dev_put(hdev);
816 return 0;
817}
818
819int hci_dev_close(__u16 dev)
820{
821 struct hci_dev *hdev;
822 int err;
823
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200824 hdev = hci_dev_get(dev);
825 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 return -ENODEV;
827 err = hci_dev_do_close(hdev);
828 hci_dev_put(hdev);
829 return err;
830}
831
832int hci_dev_reset(__u16 dev)
833{
834 struct hci_dev *hdev;
835 int ret = 0;
836
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200837 hdev = hci_dev_get(dev);
838 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return -ENODEV;
840
841 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 if (!test_bit(HCI_UP, &hdev->flags))
844 goto done;
845
846 /* Drop queues */
847 skb_queue_purge(&hdev->rx_q);
848 skb_queue_purge(&hdev->cmd_q);
849
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300850 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 inquiry_cache_flush(hdev);
852 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300853 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855 if (hdev->flush)
856 hdev->flush(hdev);
857
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900858 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300859 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200862 ret = __hci_request(hdev, hci_reset_req, 0,
863 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 hci_req_unlock(hdev);
867 hci_dev_put(hdev);
868 return ret;
869}
870
871int hci_dev_reset_stat(__u16 dev)
872{
873 struct hci_dev *hdev;
874 int ret = 0;
875
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 hdev = hci_dev_get(dev);
877 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENODEV;
879
880 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
881
882 hci_dev_put(hdev);
883
884 return ret;
885}
886
887int hci_dev_cmd(unsigned int cmd, void __user *arg)
888{
889 struct hci_dev *hdev;
890 struct hci_dev_req dr;
891 int err = 0;
892
893 if (copy_from_user(&dr, arg, sizeof(dr)))
894 return -EFAULT;
895
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200896 hdev = hci_dev_get(dr.dev_id);
897 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 return -ENODEV;
899
900 switch (cmd) {
901 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200902 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
903 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 break;
905
906 case HCISETENCRYPT:
907 if (!lmp_encrypt_capable(hdev)) {
908 err = -EOPNOTSUPP;
909 break;
910 }
911
912 if (!test_bit(HCI_AUTH, &hdev->flags)) {
913 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200914 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
915 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 if (err)
917 break;
918 }
919
Marcel Holtmann04837f62006-07-03 10:02:33 +0200920 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 break;
923
924 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200925 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
926 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 break;
928
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200929 case HCISETLINKPOL:
930 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
931 msecs_to_jiffies(HCI_INIT_TIMEOUT));
932 break;
933
934 case HCISETLINKMODE:
935 hdev->link_mode = ((__u16) dr.dev_opt) &
936 (HCI_LM_MASTER | HCI_LM_ACCEPT);
937 break;
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 case HCISETPTYPE:
940 hdev->pkt_type = (__u16) dr.dev_opt;
941 break;
942
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200944 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
945 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 break;
947
948 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200949 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
950 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 break;
952
953 default:
954 err = -EINVAL;
955 break;
956 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 hci_dev_put(hdev);
959 return err;
960}
961
962int hci_get_dev_list(void __user *arg)
963{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200964 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 struct hci_dev_list_req *dl;
966 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 int n = 0, size, err;
968 __u16 dev_num;
969
970 if (get_user(dev_num, (__u16 __user *) arg))
971 return -EFAULT;
972
973 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
974 return -EINVAL;
975
976 size = sizeof(*dl) + dev_num * sizeof(*dr);
977
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200978 dl = kzalloc(size, GFP_KERNEL);
979 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 return -ENOMEM;
981
982 dr = dl->dev_req;
983
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200984 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200985 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200986 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200987 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200988
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
990 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 (dr + n)->dev_id = hdev->id;
993 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 if (++n >= dev_num)
996 break;
997 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200998 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 dl->dev_num = n;
1001 size = sizeof(*dl) + n * sizeof(*dr);
1002
1003 err = copy_to_user(arg, dl, size);
1004 kfree(dl);
1005
1006 return err ? -EFAULT : 0;
1007}
1008
1009int hci_get_dev_info(void __user *arg)
1010{
1011 struct hci_dev *hdev;
1012 struct hci_dev_info di;
1013 int err = 0;
1014
1015 if (copy_from_user(&di, arg, sizeof(di)))
1016 return -EFAULT;
1017
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001018 hdev = hci_dev_get(di.dev_id);
1019 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 return -ENODEV;
1021
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001022 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001023 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001024
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001025 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1026 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 strcpy(di.name, hdev->name);
1029 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001030 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 di.flags = hdev->flags;
1032 di.pkt_type = hdev->pkt_type;
1033 di.acl_mtu = hdev->acl_mtu;
1034 di.acl_pkts = hdev->acl_pkts;
1035 di.sco_mtu = hdev->sco_mtu;
1036 di.sco_pkts = hdev->sco_pkts;
1037 di.link_policy = hdev->link_policy;
1038 di.link_mode = hdev->link_mode;
1039
1040 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1041 memcpy(&di.features, &hdev->features, sizeof(di.features));
1042
1043 if (copy_to_user(arg, &di, sizeof(di)))
1044 err = -EFAULT;
1045
1046 hci_dev_put(hdev);
1047
1048 return err;
1049}
1050
1051/* ---- Interface to HCI drivers ---- */
1052
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001053static int hci_rfkill_set_block(void *data, bool blocked)
1054{
1055 struct hci_dev *hdev = data;
1056
1057 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1058
1059 if (!blocked)
1060 return 0;
1061
1062 hci_dev_do_close(hdev);
1063
1064 return 0;
1065}
1066
1067static const struct rfkill_ops hci_rfkill_ops = {
1068 .set_block = hci_rfkill_set_block,
1069};
1070
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071/* Alloc HCI device */
1072struct hci_dev *hci_alloc_dev(void)
1073{
1074 struct hci_dev *hdev;
1075
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001076 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 if (!hdev)
1078 return NULL;
1079
David Herrmann0ac7e702011-10-08 14:58:47 +02001080 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 skb_queue_head_init(&hdev->driver_init);
1082
1083 return hdev;
1084}
1085EXPORT_SYMBOL(hci_alloc_dev);
1086
1087/* Free HCI device */
1088void hci_free_dev(struct hci_dev *hdev)
1089{
1090 skb_queue_purge(&hdev->driver_init);
1091
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001092 /* will free via device release */
1093 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094}
1095EXPORT_SYMBOL(hci_free_dev);
1096
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001097static void hci_power_on(struct work_struct *work)
1098{
1099 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1100
1101 BT_DBG("%s", hdev->name);
1102
1103 if (hci_dev_open(hdev->id) < 0)
1104 return;
1105
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001106 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001107 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001108 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001109
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001110 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001111 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001112}
1113
1114static void hci_power_off(struct work_struct *work)
1115{
Johan Hedberg32435532011-11-07 22:16:04 +02001116 struct hci_dev *hdev = container_of(work, struct hci_dev,
1117 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001118
1119 BT_DBG("%s", hdev->name);
1120
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001121 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001122
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001123 hci_dev_close(hdev->id);
1124}
1125
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001126static void hci_discov_off(struct work_struct *work)
1127{
1128 struct hci_dev *hdev;
1129 u8 scan = SCAN_PAGE;
1130
1131 hdev = container_of(work, struct hci_dev, discov_off.work);
1132
1133 BT_DBG("%s", hdev->name);
1134
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001135 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001136
1137 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1138
1139 hdev->discov_timeout = 0;
1140
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001141 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001142}
1143
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001144int hci_uuids_clear(struct hci_dev *hdev)
1145{
1146 struct list_head *p, *n;
1147
1148 list_for_each_safe(p, n, &hdev->uuids) {
1149 struct bt_uuid *uuid;
1150
1151 uuid = list_entry(p, struct bt_uuid, list);
1152
1153 list_del(p);
1154 kfree(uuid);
1155 }
1156
1157 return 0;
1158}
1159
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001160int hci_link_keys_clear(struct hci_dev *hdev)
1161{
1162 struct list_head *p, *n;
1163
1164 list_for_each_safe(p, n, &hdev->link_keys) {
1165 struct link_key *key;
1166
1167 key = list_entry(p, struct link_key, list);
1168
1169 list_del(p);
1170 kfree(key);
1171 }
1172
1173 return 0;
1174}
1175
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001176int hci_smp_ltks_clear(struct hci_dev *hdev)
1177{
1178 struct smp_ltk *k, *tmp;
1179
1180 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1181 list_del(&k->list);
1182 kfree(k);
1183 }
1184
1185 return 0;
1186}
1187
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001188struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1189{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001190 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001192 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001193 if (bacmp(bdaddr, &k->bdaddr) == 0)
1194 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001195
1196 return NULL;
1197}
1198
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001199static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1200 u8 key_type, u8 old_key_type)
1201{
1202 /* Legacy key */
1203 if (key_type < 0x03)
1204 return 1;
1205
1206 /* Debug keys are insecure so don't store them persistently */
1207 if (key_type == HCI_LK_DEBUG_COMBINATION)
1208 return 0;
1209
1210 /* Changed combination key and there's no previous one */
1211 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1212 return 0;
1213
1214 /* Security mode 3 case */
1215 if (!conn)
1216 return 1;
1217
1218 /* Neither local nor remote side had no-bonding as requirement */
1219 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1220 return 1;
1221
1222 /* Local side had dedicated bonding as requirement */
1223 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1224 return 1;
1225
1226 /* Remote side had dedicated bonding as requirement */
1227 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1228 return 1;
1229
1230 /* If none of the above criteria match, then don't store the key
1231 * persistently */
1232 return 0;
1233}
1234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001239 list_for_each_entry(k, &hdev->long_term_keys, list) {
1240 if (k->ediv != ediv ||
1241 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001242 continue;
1243
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001244 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001245 }
1246
1247 return NULL;
1248}
1249EXPORT_SYMBOL(hci_find_ltk);
1250
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001251struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1252 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001253{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256 list_for_each_entry(k, &hdev->long_term_keys, list)
1257 if (addr_type == k->bdaddr_type &&
1258 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001259 return k;
1260
1261 return NULL;
1262}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001263EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001265int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1266 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001267{
1268 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001269 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001270
1271 old_key = hci_find_link_key(hdev, bdaddr);
1272 if (old_key) {
1273 old_key_type = old_key->type;
1274 key = old_key;
1275 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001276 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001277 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1278 if (!key)
1279 return -ENOMEM;
1280 list_add(&key->list, &hdev->link_keys);
1281 }
1282
1283 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1284
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001285 /* Some buggy controller combinations generate a changed
1286 * combination key for legacy pairing even when there's no
1287 * previous key */
1288 if (type == HCI_LK_CHANGED_COMBINATION &&
1289 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001290 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001291 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001292 if (conn)
1293 conn->key_type = type;
1294 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001295
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001296 bacpy(&key->bdaddr, bdaddr);
1297 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001298 key->pin_len = pin_len;
1299
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001300 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001301 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001302 else
1303 key->type = type;
1304
Johan Hedberg4df378a2011-04-28 11:29:03 -07001305 if (!new_key)
1306 return 0;
1307
1308 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1309
Johan Hedberg744cf192011-11-08 20:40:14 +02001310 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001311
1312 if (!persistent) {
1313 list_del(&key->list);
1314 kfree(key);
1315 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001316
1317 return 0;
1318}
1319
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001320int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1321 int new_key, u8 authenticated, u8 tk[16],
1322 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001325
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001326 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1327 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001329 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1330 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001331 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001332 else {
1333 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001334 if (!key)
1335 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001336 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337 }
1338
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001339 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001340 key->bdaddr_type = addr_type;
1341 memcpy(key->val, tk, sizeof(key->val));
1342 key->authenticated = authenticated;
1343 key->ediv = ediv;
1344 key->enc_size = enc_size;
1345 key->type = type;
1346 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001347
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001348 if (!new_key)
1349 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001351 if (type & HCI_SMP_LTK)
1352 mgmt_new_ltk(hdev, key, 1);
1353
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001354 return 0;
1355}
1356
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001357int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1358{
1359 struct link_key *key;
1360
1361 key = hci_find_link_key(hdev, bdaddr);
1362 if (!key)
1363 return -ENOENT;
1364
1365 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1366
1367 list_del(&key->list);
1368 kfree(key);
1369
1370 return 0;
1371}
1372
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001373int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1374{
1375 struct smp_ltk *k, *tmp;
1376
1377 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1378 if (bacmp(bdaddr, &k->bdaddr))
1379 continue;
1380
1381 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1382
1383 list_del(&k->list);
1384 kfree(k);
1385 }
1386
1387 return 0;
1388}
1389
Ville Tervo6bd32322011-02-16 16:32:41 +02001390/* HCI command timer function */
1391static void hci_cmd_timer(unsigned long arg)
1392{
1393 struct hci_dev *hdev = (void *) arg;
1394
1395 BT_ERR("%s command tx timeout", hdev->name);
1396 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001397 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001398}
1399
Szymon Janc2763eda2011-03-22 13:12:22 +01001400struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1401 bdaddr_t *bdaddr)
1402{
1403 struct oob_data *data;
1404
1405 list_for_each_entry(data, &hdev->remote_oob_data, list)
1406 if (bacmp(bdaddr, &data->bdaddr) == 0)
1407 return data;
1408
1409 return NULL;
1410}
1411
1412int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1413{
1414 struct oob_data *data;
1415
1416 data = hci_find_remote_oob_data(hdev, bdaddr);
1417 if (!data)
1418 return -ENOENT;
1419
1420 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1421
1422 list_del(&data->list);
1423 kfree(data);
1424
1425 return 0;
1426}
1427
1428int hci_remote_oob_data_clear(struct hci_dev *hdev)
1429{
1430 struct oob_data *data, *n;
1431
1432 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1433 list_del(&data->list);
1434 kfree(data);
1435 }
1436
1437 return 0;
1438}
1439
1440int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1441 u8 *randomizer)
1442{
1443 struct oob_data *data;
1444
1445 data = hci_find_remote_oob_data(hdev, bdaddr);
1446
1447 if (!data) {
1448 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1449 if (!data)
1450 return -ENOMEM;
1451
1452 bacpy(&data->bdaddr, bdaddr);
1453 list_add(&data->list, &hdev->remote_oob_data);
1454 }
1455
1456 memcpy(data->hash, hash, sizeof(data->hash));
1457 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1458
1459 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1460
1461 return 0;
1462}
1463
Antti Julkub2a66aa2011-06-15 12:01:14 +03001464struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1465 bdaddr_t *bdaddr)
1466{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001467 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001468
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001469 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001470 if (bacmp(bdaddr, &b->bdaddr) == 0)
1471 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001472
1473 return NULL;
1474}
1475
1476int hci_blacklist_clear(struct hci_dev *hdev)
1477{
1478 struct list_head *p, *n;
1479
1480 list_for_each_safe(p, n, &hdev->blacklist) {
1481 struct bdaddr_list *b;
1482
1483 b = list_entry(p, struct bdaddr_list, list);
1484
1485 list_del(p);
1486 kfree(b);
1487 }
1488
1489 return 0;
1490}
1491
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001492int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001493{
1494 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
1496 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1497 return -EBADF;
1498
Antti Julku5e762442011-08-25 16:48:02 +03001499 if (hci_blacklist_lookup(hdev, bdaddr))
1500 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501
1502 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001503 if (!entry)
1504 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001505
1506 bacpy(&entry->bdaddr, bdaddr);
1507
1508 list_add(&entry->list, &hdev->blacklist);
1509
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001510 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511}
1512
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001513int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514{
1515 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001516
Szymon Janc1ec918c2011-11-16 09:32:21 +01001517 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001518 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519
1520 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001521 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001522 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001523
1524 list_del(&entry->list);
1525 kfree(entry);
1526
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001527 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001528}
1529
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001530static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001531{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001532 struct hci_dev *hdev = container_of(work, struct hci_dev,
1533 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001534
1535 hci_dev_lock(hdev);
1536
1537 hci_adv_entries_clear(hdev);
1538
1539 hci_dev_unlock(hdev);
1540}
1541
Andre Guedes76c86862011-05-26 16:23:50 -03001542int hci_adv_entries_clear(struct hci_dev *hdev)
1543{
1544 struct adv_entry *entry, *tmp;
1545
1546 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1547 list_del(&entry->list);
1548 kfree(entry);
1549 }
1550
1551 BT_DBG("%s adv cache cleared", hdev->name);
1552
1553 return 0;
1554}
1555
1556struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1557{
1558 struct adv_entry *entry;
1559
1560 list_for_each_entry(entry, &hdev->adv_entries, list)
1561 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1562 return entry;
1563
1564 return NULL;
1565}
1566
1567static inline int is_connectable_adv(u8 evt_type)
1568{
1569 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1570 return 1;
1571
1572 return 0;
1573}
1574
1575int hci_add_adv_entry(struct hci_dev *hdev,
1576 struct hci_ev_le_advertising_info *ev)
1577{
1578 struct adv_entry *entry;
1579
1580 if (!is_connectable_adv(ev->evt_type))
1581 return -EINVAL;
1582
1583 /* Only new entries should be added to adv_entries. So, if
1584 * bdaddr was found, don't add it. */
1585 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1586 return 0;
1587
Andre Guedes4777bfd2012-01-30 23:31:28 -03001588 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001589 if (!entry)
1590 return -ENOMEM;
1591
1592 bacpy(&entry->bdaddr, &ev->bdaddr);
1593 entry->bdaddr_type = ev->bdaddr_type;
1594
1595 list_add(&entry->list, &hdev->adv_entries);
1596
1597 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1598 batostr(&entry->bdaddr), entry->bdaddr_type);
1599
1600 return 0;
1601}
1602
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001603static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1604{
1605 struct le_scan_params *param = (struct le_scan_params *) opt;
1606 struct hci_cp_le_set_scan_param cp;
1607
1608 memset(&cp, 0, sizeof(cp));
1609 cp.type = param->type;
1610 cp.interval = cpu_to_le16(param->interval);
1611 cp.window = cpu_to_le16(param->window);
1612
1613 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1614}
1615
1616static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1617{
1618 struct hci_cp_le_set_scan_enable cp;
1619
1620 memset(&cp, 0, sizeof(cp));
1621 cp.enable = 1;
1622
1623 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1624}
1625
1626static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1627 u16 window, int timeout)
1628{
1629 long timeo = msecs_to_jiffies(3000);
1630 struct le_scan_params param;
1631 int err;
1632
1633 BT_DBG("%s", hdev->name);
1634
1635 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1636 return -EINPROGRESS;
1637
1638 param.type = type;
1639 param.interval = interval;
1640 param.window = window;
1641
1642 hci_req_lock(hdev);
1643
1644 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1645 timeo);
1646 if (!err)
1647 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1648
1649 hci_req_unlock(hdev);
1650
1651 if (err < 0)
1652 return err;
1653
1654 schedule_delayed_work(&hdev->le_scan_disable,
1655 msecs_to_jiffies(timeout));
1656
1657 return 0;
1658}
1659
1660static void le_scan_disable_work(struct work_struct *work)
1661{
1662 struct hci_dev *hdev = container_of(work, struct hci_dev,
1663 le_scan_disable.work);
1664 struct hci_cp_le_set_scan_enable cp;
1665
1666 BT_DBG("%s", hdev->name);
1667
1668 memset(&cp, 0, sizeof(cp));
1669
1670 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1671}
1672
Andre Guedes28b75a82012-02-03 17:48:00 -03001673static void le_scan_work(struct work_struct *work)
1674{
1675 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1676 struct le_scan_params *param = &hdev->le_scan_params;
1677
1678 BT_DBG("%s", hdev->name);
1679
1680 hci_do_le_scan(hdev, param->type, param->interval,
1681 param->window, param->timeout);
1682}
1683
1684int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1685 int timeout)
1686{
1687 struct le_scan_params *param = &hdev->le_scan_params;
1688
1689 BT_DBG("%s", hdev->name);
1690
1691 if (work_busy(&hdev->le_scan))
1692 return -EINPROGRESS;
1693
1694 param->type = type;
1695 param->interval = interval;
1696 param->window = window;
1697 param->timeout = timeout;
1698
1699 queue_work(system_long_wq, &hdev->le_scan);
1700
1701 return 0;
1702}
1703
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704/* Register HCI device */
1705int hci_register_dev(struct hci_dev *hdev)
1706{
1707 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001708 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001710 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
David Herrmann010666a2012-01-07 15:47:07 +01001712 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 return -EINVAL;
1714
Mat Martineau08add512011-11-02 16:18:36 -07001715 /* Do not allow HCI_AMP devices to register at index 0,
1716 * so the index can be used as the AMP controller ID.
1717 */
1718 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1719
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001720 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 /* Find first available device id */
1723 list_for_each(p, &hci_dev_list) {
1724 if (list_entry(p, struct hci_dev, list)->id != id)
1725 break;
1726 head = p; id++;
1727 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001728
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 sprintf(hdev->name, "hci%d", id);
1730 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001731 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001733 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001736 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001738 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001740 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Marcel Holtmann04837f62006-07-03 10:02:33 +02001742 hdev->idle_timeout = 0;
1743 hdev->sniff_max_interval = 800;
1744 hdev->sniff_min_interval = 80;
1745
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001746 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001747 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001748 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 skb_queue_head_init(&hdev->rx_q);
1752 skb_queue_head_init(&hdev->cmd_q);
1753 skb_queue_head_init(&hdev->raw_q);
1754
Ville Tervo6bd32322011-02-16 16:32:41 +02001755 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1756
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301757 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001758 hdev->reassembly[i] = NULL;
1759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001761 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
Johan Hedberg30883512012-01-04 14:16:21 +02001763 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
1765 hci_conn_hash_init(hdev);
1766
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001767 INIT_LIST_HEAD(&hdev->mgmt_pending);
1768
David Millerea4bd8b2010-07-30 21:54:49 -07001769 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001770
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001771 INIT_LIST_HEAD(&hdev->uuids);
1772
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001773 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001774 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001775
Szymon Janc2763eda2011-03-22 13:12:22 +01001776 INIT_LIST_HEAD(&hdev->remote_oob_data);
1777
Andre Guedes76c86862011-05-26 16:23:50 -03001778 INIT_LIST_HEAD(&hdev->adv_entries);
1779
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001780 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001781 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001782 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001783
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001784 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1785
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1787
1788 atomic_set(&hdev->promisc, 0);
1789
Andre Guedes28b75a82012-02-03 17:48:00 -03001790 INIT_WORK(&hdev->le_scan, le_scan_work);
1791
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001792 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1793
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001794 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001796 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1797 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001798 if (!hdev->workqueue) {
1799 error = -ENOMEM;
1800 goto err;
1801 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001802
David Herrmann33ca9542011-10-08 14:58:49 +02001803 error = hci_add_sysfs(hdev);
1804 if (error < 0)
1805 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001807 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1808 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1809 if (hdev->rfkill) {
1810 if (rfkill_register(hdev->rfkill) < 0) {
1811 rfkill_destroy(hdev->rfkill);
1812 hdev->rfkill = NULL;
1813 }
1814 }
1815
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001816 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1817 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001818 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001821 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001824
David Herrmann33ca9542011-10-08 14:58:49 +02001825err_wqueue:
1826 destroy_workqueue(hdev->workqueue);
1827err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001828 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001829 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001830 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001831
David Herrmann33ca9542011-10-08 14:58:49 +02001832 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
1834EXPORT_SYMBOL(hci_register_dev);
1835
1836/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001837void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838{
Marcel Holtmannef222012007-07-11 06:42:04 +02001839 int i;
1840
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001841 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001843 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001845 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846
1847 hci_dev_do_close(hdev);
1848
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301849 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001850 kfree_skb(hdev->reassembly[i]);
1851
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001852 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001853 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001854 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001855 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001856 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001857 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001858
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001859 /* mgmt_index_removed should take care of emptying the
1860 * pending list */
1861 BUG_ON(!list_empty(&hdev->mgmt_pending));
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863 hci_notify(hdev, HCI_DEV_UNREG);
1864
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001865 if (hdev->rfkill) {
1866 rfkill_unregister(hdev->rfkill);
1867 rfkill_destroy(hdev->rfkill);
1868 }
1869
David Herrmannce242972011-10-08 14:58:48 +02001870 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001871
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001872 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001873
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001874 destroy_workqueue(hdev->workqueue);
1875
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001876 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001877 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001878 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001879 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001880 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001881 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001882 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001883 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001884
David Herrmanndc946bd2012-01-07 15:47:24 +01001885 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886}
1887EXPORT_SYMBOL(hci_unregister_dev);
1888
1889/* Suspend HCI device */
1890int hci_suspend_dev(struct hci_dev *hdev)
1891{
1892 hci_notify(hdev, HCI_DEV_SUSPEND);
1893 return 0;
1894}
1895EXPORT_SYMBOL(hci_suspend_dev);
1896
1897/* Resume HCI device */
1898int hci_resume_dev(struct hci_dev *hdev)
1899{
1900 hci_notify(hdev, HCI_DEV_RESUME);
1901 return 0;
1902}
1903EXPORT_SYMBOL(hci_resume_dev);
1904
Marcel Holtmann76bca882009-11-18 00:40:39 +01001905/* Receive frame from HCI drivers */
1906int hci_recv_frame(struct sk_buff *skb)
1907{
1908 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1909 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1910 && !test_bit(HCI_INIT, &hdev->flags))) {
1911 kfree_skb(skb);
1912 return -ENXIO;
1913 }
1914
1915 /* Incomming skb */
1916 bt_cb(skb)->incoming = 1;
1917
1918 /* Time stamp */
1919 __net_timestamp(skb);
1920
Marcel Holtmann76bca882009-11-18 00:40:39 +01001921 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001922 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001923
Marcel Holtmann76bca882009-11-18 00:40:39 +01001924 return 0;
1925}
1926EXPORT_SYMBOL(hci_recv_frame);
1927
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301928static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001929 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301930{
1931 int len = 0;
1932 int hlen = 0;
1933 int remain = count;
1934 struct sk_buff *skb;
1935 struct bt_skb_cb *scb;
1936
1937 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1938 index >= NUM_REASSEMBLY)
1939 return -EILSEQ;
1940
1941 skb = hdev->reassembly[index];
1942
1943 if (!skb) {
1944 switch (type) {
1945 case HCI_ACLDATA_PKT:
1946 len = HCI_MAX_FRAME_SIZE;
1947 hlen = HCI_ACL_HDR_SIZE;
1948 break;
1949 case HCI_EVENT_PKT:
1950 len = HCI_MAX_EVENT_SIZE;
1951 hlen = HCI_EVENT_HDR_SIZE;
1952 break;
1953 case HCI_SCODATA_PKT:
1954 len = HCI_MAX_SCO_SIZE;
1955 hlen = HCI_SCO_HDR_SIZE;
1956 break;
1957 }
1958
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001959 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301960 if (!skb)
1961 return -ENOMEM;
1962
1963 scb = (void *) skb->cb;
1964 scb->expect = hlen;
1965 scb->pkt_type = type;
1966
1967 skb->dev = (void *) hdev;
1968 hdev->reassembly[index] = skb;
1969 }
1970
1971 while (count) {
1972 scb = (void *) skb->cb;
1973 len = min(scb->expect, (__u16)count);
1974
1975 memcpy(skb_put(skb, len), data, len);
1976
1977 count -= len;
1978 data += len;
1979 scb->expect -= len;
1980 remain = count;
1981
1982 switch (type) {
1983 case HCI_EVENT_PKT:
1984 if (skb->len == HCI_EVENT_HDR_SIZE) {
1985 struct hci_event_hdr *h = hci_event_hdr(skb);
1986 scb->expect = h->plen;
1987
1988 if (skb_tailroom(skb) < scb->expect) {
1989 kfree_skb(skb);
1990 hdev->reassembly[index] = NULL;
1991 return -ENOMEM;
1992 }
1993 }
1994 break;
1995
1996 case HCI_ACLDATA_PKT:
1997 if (skb->len == HCI_ACL_HDR_SIZE) {
1998 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1999 scb->expect = __le16_to_cpu(h->dlen);
2000
2001 if (skb_tailroom(skb) < scb->expect) {
2002 kfree_skb(skb);
2003 hdev->reassembly[index] = NULL;
2004 return -ENOMEM;
2005 }
2006 }
2007 break;
2008
2009 case HCI_SCODATA_PKT:
2010 if (skb->len == HCI_SCO_HDR_SIZE) {
2011 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2012 scb->expect = h->dlen;
2013
2014 if (skb_tailroom(skb) < scb->expect) {
2015 kfree_skb(skb);
2016 hdev->reassembly[index] = NULL;
2017 return -ENOMEM;
2018 }
2019 }
2020 break;
2021 }
2022
2023 if (scb->expect == 0) {
2024 /* Complete frame */
2025
2026 bt_cb(skb)->pkt_type = type;
2027 hci_recv_frame(skb);
2028
2029 hdev->reassembly[index] = NULL;
2030 return remain;
2031 }
2032 }
2033
2034 return remain;
2035}
2036
Marcel Holtmannef222012007-07-11 06:42:04 +02002037int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2038{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302039 int rem = 0;
2040
Marcel Holtmannef222012007-07-11 06:42:04 +02002041 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2042 return -EILSEQ;
2043
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002044 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002045 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302046 if (rem < 0)
2047 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002048
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302049 data += (count - rem);
2050 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002051 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002052
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302053 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002054}
2055EXPORT_SYMBOL(hci_recv_fragment);
2056
Suraj Sumangala99811512010-07-14 13:02:19 +05302057#define STREAM_REASSEMBLY 0
2058
2059int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2060{
2061 int type;
2062 int rem = 0;
2063
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002064 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302065 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2066
2067 if (!skb) {
2068 struct { char type; } *pkt;
2069
2070 /* Start of the frame */
2071 pkt = data;
2072 type = pkt->type;
2073
2074 data++;
2075 count--;
2076 } else
2077 type = bt_cb(skb)->pkt_type;
2078
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002079 rem = hci_reassembly(hdev, type, data, count,
2080 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302081 if (rem < 0)
2082 return rem;
2083
2084 data += (count - rem);
2085 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002086 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302087
2088 return rem;
2089}
2090EXPORT_SYMBOL(hci_recv_stream_fragment);
2091
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092/* ---- Interface to upper protocols ---- */
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094int hci_register_cb(struct hci_cb *cb)
2095{
2096 BT_DBG("%p name %s", cb, cb->name);
2097
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002098 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002100 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 return 0;
2103}
2104EXPORT_SYMBOL(hci_register_cb);
2105
2106int hci_unregister_cb(struct hci_cb *cb)
2107{
2108 BT_DBG("%p name %s", cb, cb->name);
2109
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002110 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002112 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 return 0;
2115}
2116EXPORT_SYMBOL(hci_unregister_cb);
2117
2118static int hci_send_frame(struct sk_buff *skb)
2119{
2120 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2121
2122 if (!hdev) {
2123 kfree_skb(skb);
2124 return -ENODEV;
2125 }
2126
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002127 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 if (atomic_read(&hdev->promisc)) {
2130 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002131 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002133 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 }
2135
2136 /* Get rid of skb owner, prior to sending to the driver. */
2137 skb_orphan(skb);
2138
2139 return hdev->send(skb);
2140}
2141
2142/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002143int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144{
2145 int len = HCI_COMMAND_HDR_SIZE + plen;
2146 struct hci_command_hdr *hdr;
2147 struct sk_buff *skb;
2148
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002149 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150
2151 skb = bt_skb_alloc(len, GFP_ATOMIC);
2152 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002153 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 return -ENOMEM;
2155 }
2156
2157 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002158 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 hdr->plen = plen;
2160
2161 if (plen)
2162 memcpy(skb_put(skb, plen), param, plen);
2163
2164 BT_DBG("skb len %d", skb->len);
2165
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002166 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002168
Johan Hedberga5040ef2011-01-10 13:28:59 +02002169 if (test_bit(HCI_INIT, &hdev->flags))
2170 hdev->init_last_cmd = opcode;
2171
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002173 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 return 0;
2176}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177
2178/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002179void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180{
2181 struct hci_command_hdr *hdr;
2182
2183 if (!hdev->sent_cmd)
2184 return NULL;
2185
2186 hdr = (void *) hdev->sent_cmd->data;
2187
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002188 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 return NULL;
2190
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002191 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2194}
2195
2196/* Send ACL data */
2197static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2198{
2199 struct hci_acl_hdr *hdr;
2200 int len = skb->len;
2201
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002202 skb_push(skb, HCI_ACL_HDR_SIZE);
2203 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002204 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002205 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2206 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2210 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211{
2212 struct hci_dev *hdev = conn->hdev;
2213 struct sk_buff *list;
2214
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002215 list = skb_shinfo(skb)->frag_list;
2216 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 /* Non fragmented */
2218 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2219
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002220 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 } else {
2222 /* Fragmented */
2223 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2224
2225 skb_shinfo(skb)->frag_list = NULL;
2226
2227 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002228 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002230 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002231
2232 flags &= ~ACL_START;
2233 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 do {
2235 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002238 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002239 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2242
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002243 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 } while (list);
2245
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002246 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002248}
2249
2250void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2251{
2252 struct hci_conn *conn = chan->conn;
2253 struct hci_dev *hdev = conn->hdev;
2254
2255 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2256
2257 skb->dev = (void *) hdev;
2258 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259 hci_add_acl_hdr(skb, conn->handle, flags);
2260
2261 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002263 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264}
2265EXPORT_SYMBOL(hci_send_acl);
2266
2267/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002268void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269{
2270 struct hci_dev *hdev = conn->hdev;
2271 struct hci_sco_hdr hdr;
2272
2273 BT_DBG("%s len %d", hdev->name, skb->len);
2274
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002275 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 hdr.dlen = skb->len;
2277
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002278 skb_push(skb, HCI_SCO_HDR_SIZE);
2279 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002280 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
2282 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002283 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002286 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287}
2288EXPORT_SYMBOL(hci_send_sco);
2289
2290/* ---- HCI TX task (outgoing data) ---- */
2291
2292/* HCI Connection scheduler */
2293static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2294{
2295 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002296 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002299 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002301
2302 rcu_read_lock();
2303
2304 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002305 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002307
2308 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2309 continue;
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 num++;
2312
2313 if (c->sent < min) {
2314 min = c->sent;
2315 conn = c;
2316 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002317
2318 if (hci_conn_num(hdev, type) == num)
2319 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 }
2321
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002322 rcu_read_unlock();
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002325 int cnt, q;
2326
2327 switch (conn->type) {
2328 case ACL_LINK:
2329 cnt = hdev->acl_cnt;
2330 break;
2331 case SCO_LINK:
2332 case ESCO_LINK:
2333 cnt = hdev->sco_cnt;
2334 break;
2335 case LE_LINK:
2336 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2337 break;
2338 default:
2339 cnt = 0;
2340 BT_ERR("Unknown link type");
2341 }
2342
2343 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 *quote = q ? q : 1;
2345 } else
2346 *quote = 0;
2347
2348 BT_DBG("conn %p quote %d", conn, *quote);
2349 return conn;
2350}
2351
Ville Tervobae1f5d92011-02-10 22:38:53 -03002352static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
2354 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002355 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Ville Tervobae1f5d92011-02-10 22:38:53 -03002357 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002359 rcu_read_lock();
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002362 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002363 if (c->type == type && c->sent) {
2364 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 hdev->name, batostr(&c->dst));
2366 hci_acl_disconn(c, 0x13);
2367 }
2368 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002369
2370 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371}
2372
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002373static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2374 int *quote)
2375{
2376 struct hci_conn_hash *h = &hdev->conn_hash;
2377 struct hci_chan *chan = NULL;
2378 int num = 0, min = ~0, cur_prio = 0;
2379 struct hci_conn *conn;
2380 int cnt, q, conn_num = 0;
2381
2382 BT_DBG("%s", hdev->name);
2383
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002384 rcu_read_lock();
2385
2386 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002387 struct hci_chan *tmp;
2388
2389 if (conn->type != type)
2390 continue;
2391
2392 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2393 continue;
2394
2395 conn_num++;
2396
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002397 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002398 struct sk_buff *skb;
2399
2400 if (skb_queue_empty(&tmp->data_q))
2401 continue;
2402
2403 skb = skb_peek(&tmp->data_q);
2404 if (skb->priority < cur_prio)
2405 continue;
2406
2407 if (skb->priority > cur_prio) {
2408 num = 0;
2409 min = ~0;
2410 cur_prio = skb->priority;
2411 }
2412
2413 num++;
2414
2415 if (conn->sent < min) {
2416 min = conn->sent;
2417 chan = tmp;
2418 }
2419 }
2420
2421 if (hci_conn_num(hdev, type) == conn_num)
2422 break;
2423 }
2424
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002425 rcu_read_unlock();
2426
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002427 if (!chan)
2428 return NULL;
2429
2430 switch (chan->conn->type) {
2431 case ACL_LINK:
2432 cnt = hdev->acl_cnt;
2433 break;
2434 case SCO_LINK:
2435 case ESCO_LINK:
2436 cnt = hdev->sco_cnt;
2437 break;
2438 case LE_LINK:
2439 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2440 break;
2441 default:
2442 cnt = 0;
2443 BT_ERR("Unknown link type");
2444 }
2445
2446 q = cnt / num;
2447 *quote = q ? q : 1;
2448 BT_DBG("chan %p quote %d", chan, *quote);
2449 return chan;
2450}
2451
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002452static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2453{
2454 struct hci_conn_hash *h = &hdev->conn_hash;
2455 struct hci_conn *conn;
2456 int num = 0;
2457
2458 BT_DBG("%s", hdev->name);
2459
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002460 rcu_read_lock();
2461
2462 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002463 struct hci_chan *chan;
2464
2465 if (conn->type != type)
2466 continue;
2467
2468 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2469 continue;
2470
2471 num++;
2472
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002473 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002474 struct sk_buff *skb;
2475
2476 if (chan->sent) {
2477 chan->sent = 0;
2478 continue;
2479 }
2480
2481 if (skb_queue_empty(&chan->data_q))
2482 continue;
2483
2484 skb = skb_peek(&chan->data_q);
2485 if (skb->priority >= HCI_PRIO_MAX - 1)
2486 continue;
2487
2488 skb->priority = HCI_PRIO_MAX - 1;
2489
2490 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2491 skb->priority);
2492 }
2493
2494 if (hci_conn_num(hdev, type) == num)
2495 break;
2496 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002497
2498 rcu_read_unlock();
2499
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002500}
2501
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002502static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2503{
2504 /* Calculate count of blocks used by this packet */
2505 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2506}
2507
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 if (!test_bit(HCI_RAW, &hdev->flags)) {
2511 /* ACL tx timeout must be longer than maximum
2512 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002513 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002514 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002515 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002517}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002519static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2520{
2521 unsigned int cnt = hdev->acl_cnt;
2522 struct hci_chan *chan;
2523 struct sk_buff *skb;
2524 int quote;
2525
2526 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002527
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002528 while (hdev->acl_cnt &&
2529 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002530 u32 priority = (skb_peek(&chan->data_q))->priority;
2531 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002532 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2533 skb->len, skb->priority);
2534
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002535 /* Stop if priority has changed */
2536 if (skb->priority < priority)
2537 break;
2538
2539 skb = skb_dequeue(&chan->data_q);
2540
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002541 hci_conn_enter_active_mode(chan->conn,
2542 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 hci_send_frame(skb);
2545 hdev->acl_last_tx = jiffies;
2546
2547 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002548 chan->sent++;
2549 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
2551 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002552
2553 if (cnt != hdev->acl_cnt)
2554 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555}
2556
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002557static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2558{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002559 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002560 struct hci_chan *chan;
2561 struct sk_buff *skb;
2562 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002563
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002564 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002565
2566 while (hdev->block_cnt > 0 &&
2567 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2568 u32 priority = (skb_peek(&chan->data_q))->priority;
2569 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2570 int blocks;
2571
2572 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2573 skb->len, skb->priority);
2574
2575 /* Stop if priority has changed */
2576 if (skb->priority < priority)
2577 break;
2578
2579 skb = skb_dequeue(&chan->data_q);
2580
2581 blocks = __get_blocks(hdev, skb);
2582 if (blocks > hdev->block_cnt)
2583 return;
2584
2585 hci_conn_enter_active_mode(chan->conn,
2586 bt_cb(skb)->force_active);
2587
2588 hci_send_frame(skb);
2589 hdev->acl_last_tx = jiffies;
2590
2591 hdev->block_cnt -= blocks;
2592 quote -= blocks;
2593
2594 chan->sent += blocks;
2595 chan->conn->sent += blocks;
2596 }
2597 }
2598
2599 if (cnt != hdev->block_cnt)
2600 hci_prio_recalculate(hdev, ACL_LINK);
2601}
2602
2603static inline void hci_sched_acl(struct hci_dev *hdev)
2604{
2605 BT_DBG("%s", hdev->name);
2606
2607 if (!hci_conn_num(hdev, ACL_LINK))
2608 return;
2609
2610 switch (hdev->flow_ctl_mode) {
2611 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2612 hci_sched_acl_pkt(hdev);
2613 break;
2614
2615 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2616 hci_sched_acl_blk(hdev);
2617 break;
2618 }
2619}
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621/* Schedule SCO */
2622static inline void hci_sched_sco(struct hci_dev *hdev)
2623{
2624 struct hci_conn *conn;
2625 struct sk_buff *skb;
2626 int quote;
2627
2628 BT_DBG("%s", hdev->name);
2629
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002630 if (!hci_conn_num(hdev, SCO_LINK))
2631 return;
2632
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2634 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2635 BT_DBG("skb %p len %d", skb, skb->len);
2636 hci_send_frame(skb);
2637
2638 conn->sent++;
2639 if (conn->sent == ~0)
2640 conn->sent = 0;
2641 }
2642 }
2643}
2644
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002645static inline void hci_sched_esco(struct hci_dev *hdev)
2646{
2647 struct hci_conn *conn;
2648 struct sk_buff *skb;
2649 int quote;
2650
2651 BT_DBG("%s", hdev->name);
2652
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002653 if (!hci_conn_num(hdev, ESCO_LINK))
2654 return;
2655
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002656 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2657 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2658 BT_DBG("skb %p len %d", skb, skb->len);
2659 hci_send_frame(skb);
2660
2661 conn->sent++;
2662 if (conn->sent == ~0)
2663 conn->sent = 0;
2664 }
2665 }
2666}
2667
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002668static inline void hci_sched_le(struct hci_dev *hdev)
2669{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002670 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002671 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002672 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002673
2674 BT_DBG("%s", hdev->name);
2675
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002676 if (!hci_conn_num(hdev, LE_LINK))
2677 return;
2678
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002679 if (!test_bit(HCI_RAW, &hdev->flags)) {
2680 /* LE tx timeout must be longer than maximum
2681 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002682 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002683 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002684 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002685 }
2686
2687 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002688 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002689 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002690 u32 priority = (skb_peek(&chan->data_q))->priority;
2691 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002692 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2693 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002694
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002695 /* Stop if priority has changed */
2696 if (skb->priority < priority)
2697 break;
2698
2699 skb = skb_dequeue(&chan->data_q);
2700
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002701 hci_send_frame(skb);
2702 hdev->le_last_tx = jiffies;
2703
2704 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002705 chan->sent++;
2706 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002707 }
2708 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002709
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002710 if (hdev->le_pkts)
2711 hdev->le_cnt = cnt;
2712 else
2713 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002714
2715 if (cnt != tmp)
2716 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002717}
2718
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002719static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002721 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 struct sk_buff *skb;
2723
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2725 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
2727 /* Schedule queues and send stuff to HCI driver */
2728
2729 hci_sched_acl(hdev);
2730
2731 hci_sched_sco(hdev);
2732
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002733 hci_sched_esco(hdev);
2734
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002735 hci_sched_le(hdev);
2736
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 /* Send next queued raw (unknown type) packet */
2738 while ((skb = skb_dequeue(&hdev->raw_q)))
2739 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740}
2741
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002742/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743
2744/* ACL data packet */
2745static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2746{
2747 struct hci_acl_hdr *hdr = (void *) skb->data;
2748 struct hci_conn *conn;
2749 __u16 handle, flags;
2750
2751 skb_pull(skb, HCI_ACL_HDR_SIZE);
2752
2753 handle = __le16_to_cpu(hdr->handle);
2754 flags = hci_flags(handle);
2755 handle = hci_handle(handle);
2756
2757 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2758
2759 hdev->stat.acl_rx++;
2760
2761 hci_dev_lock(hdev);
2762 conn = hci_conn_hash_lookup_handle(hdev, handle);
2763 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002764
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002766 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002769 l2cap_recv_acldata(conn, skb, flags);
2770 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 hdev->name, handle);
2774 }
2775
2776 kfree_skb(skb);
2777}
2778
2779/* SCO data packet */
2780static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2781{
2782 struct hci_sco_hdr *hdr = (void *) skb->data;
2783 struct hci_conn *conn;
2784 __u16 handle;
2785
2786 skb_pull(skb, HCI_SCO_HDR_SIZE);
2787
2788 handle = __le16_to_cpu(hdr->handle);
2789
2790 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2791
2792 hdev->stat.sco_rx++;
2793
2794 hci_dev_lock(hdev);
2795 conn = hci_conn_hash_lookup_handle(hdev, handle);
2796 hci_dev_unlock(hdev);
2797
2798 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002800 sco_recv_scodata(conn, skb);
2801 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002803 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hdev->name, handle);
2805 }
2806
2807 kfree_skb(skb);
2808}
2809
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002810static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002812 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 struct sk_buff *skb;
2814
2815 BT_DBG("%s", hdev->name);
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 while ((skb = skb_dequeue(&hdev->rx_q))) {
2818 if (atomic_read(&hdev->promisc)) {
2819 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002820 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 }
2822
2823 if (test_bit(HCI_RAW, &hdev->flags)) {
2824 kfree_skb(skb);
2825 continue;
2826 }
2827
2828 if (test_bit(HCI_INIT, &hdev->flags)) {
2829 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002830 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 case HCI_ACLDATA_PKT:
2832 case HCI_SCODATA_PKT:
2833 kfree_skb(skb);
2834 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
2837
2838 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002839 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002841 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 hci_event_packet(hdev, skb);
2843 break;
2844
2845 case HCI_ACLDATA_PKT:
2846 BT_DBG("%s ACL data packet", hdev->name);
2847 hci_acldata_packet(hdev, skb);
2848 break;
2849
2850 case HCI_SCODATA_PKT:
2851 BT_DBG("%s SCO data packet", hdev->name);
2852 hci_scodata_packet(hdev, skb);
2853 break;
2854
2855 default:
2856 kfree_skb(skb);
2857 break;
2858 }
2859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860}
2861
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002862static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002864 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 struct sk_buff *skb;
2866
2867 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2868
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002870 if (atomic_read(&hdev->cmd_cnt)) {
2871 skb = skb_dequeue(&hdev->cmd_q);
2872 if (!skb)
2873 return;
2874
Wei Yongjun7585b972009-02-25 18:29:52 +08002875 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002877 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2878 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 atomic_dec(&hdev->cmd_cnt);
2880 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002881 if (test_bit(HCI_RESET, &hdev->flags))
2882 del_timer(&hdev->cmd_timer);
2883 else
2884 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002885 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 } else {
2887 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002888 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 }
2890 }
2891}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002892
2893int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2894{
2895 /* General inquiry access code (GIAC) */
2896 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2897 struct hci_cp_inquiry cp;
2898
2899 BT_DBG("%s", hdev->name);
2900
2901 if (test_bit(HCI_INQUIRY, &hdev->flags))
2902 return -EINPROGRESS;
2903
Johan Hedberg46632622012-01-02 16:06:08 +02002904 inquiry_cache_flush(hdev);
2905
Andre Guedes2519a1f2011-11-07 11:45:24 -03002906 memset(&cp, 0, sizeof(cp));
2907 memcpy(&cp.lap, lap, sizeof(cp.lap));
2908 cp.length = length;
2909
2910 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2911}
Andre Guedes023d50492011-11-04 14:16:52 -03002912
2913int hci_cancel_inquiry(struct hci_dev *hdev)
2914{
2915 BT_DBG("%s", hdev->name);
2916
2917 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2918 return -EPERM;
2919
2920 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2921}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002922
2923module_param(enable_hs, bool, 0644);
2924MODULE_PARM_DESC(enable_hs, "Enable High Speed");