blob: dc31e7d6028e33ae1f409a52c453deaad380ae0c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
Andre Guedes6fbe1952012-02-03 17:47:58 -0300363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200367 return true;
368
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 default:
370 return false;
371 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200372}
373
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374void hci_discovery_set_state(struct hci_dev *hdev, int state)
375{
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300383 if (hdev->discovery.state != DISCOVERY_STARTING)
384 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200385 break;
386 case DISCOVERY_STARTING:
387 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200388 case DISCOVERY_INQUIRY:
Andre Guedesc5990082012-02-03 17:47:57 -0300389 case DISCOVERY_LE_SCAN:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200390 mgmt_discovering(hdev, 1);
391 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200392 case DISCOVERY_RESOLVING:
393 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200394 case DISCOVERY_STOPPING:
395 break;
396 }
397
398 hdev->discovery.state = state;
399}
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401static void inquiry_cache_flush(struct hci_dev *hdev)
402{
Johan Hedberg30883512012-01-04 14:16:21 +0200403 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200404 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
Johan Hedberg561aafb2012-01-04 13:31:59 +0200406 list_for_each_entry_safe(p, n, &cache->all, all) {
407 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200408 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200410
411 INIT_LIST_HEAD(&cache->unknown);
412 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200413 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
416struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
417{
Johan Hedberg30883512012-01-04 14:16:21 +0200418 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 struct inquiry_entry *e;
420
421 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
422
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200425 return e;
426 }
427
428 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429}
430
Johan Hedberg561aafb2012-01-04 13:31:59 +0200431struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
432 bdaddr_t *bdaddr)
433{
Johan Hedberg30883512012-01-04 14:16:21 +0200434 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435 struct inquiry_entry *e;
436
437 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
438
439 list_for_each_entry(e, &cache->unknown, list) {
440 if (!bacmp(&e->data.bdaddr, bdaddr))
441 return e;
442 }
443
444 return NULL;
445}
446
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200447struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
448 bdaddr_t *bdaddr,
449 int state)
450{
451 struct discovery_state *cache = &hdev->discovery;
452 struct inquiry_entry *e;
453
454 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
455
456 list_for_each_entry(e, &cache->resolve, list) {
457 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
458 return e;
459 if (!bacmp(&e->data.bdaddr, bdaddr))
460 return e;
461 }
462
463 return NULL;
464}
465
Johan Hedberga3d4e202012-01-09 00:53:02 +0200466void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
467 struct inquiry_entry *ie)
468{
469 struct discovery_state *cache = &hdev->discovery;
470 struct list_head *pos = &cache->resolve;
471 struct inquiry_entry *p;
472
473 list_del(&ie->list);
474
475 list_for_each_entry(p, &cache->resolve, list) {
476 if (p->name_state != NAME_PENDING &&
477 abs(p->data.rssi) >= abs(ie->data.rssi))
478 break;
479 pos = &p->list;
480 }
481
482 list_add(&ie->list, pos);
483}
484
Johan Hedberg31754052012-01-04 13:39:52 +0200485bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200486 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
Johan Hedberg30883512012-01-04 14:16:21 +0200488 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200489 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
491 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
492
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200493 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200494 if (ie) {
495 if (ie->name_state == NAME_NEEDED &&
496 data->rssi != ie->data.rssi) {
497 ie->data.rssi = data->rssi;
498 hci_inquiry_cache_update_resolve(hdev, ie);
499 }
500
Johan Hedberg561aafb2012-01-04 13:31:59 +0200501 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200502 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200503
Johan Hedberg561aafb2012-01-04 13:31:59 +0200504 /* Entry not in the cache. Add new one. */
505 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
506 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200507 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200508
509 list_add(&ie->all, &cache->all);
510
511 if (name_known) {
512 ie->name_state = NAME_KNOWN;
513 } else {
514 ie->name_state = NAME_NOT_KNOWN;
515 list_add(&ie->list, &cache->unknown);
516 }
517
518update:
519 if (name_known && ie->name_state != NAME_KNOWN &&
520 ie->name_state != NAME_PENDING) {
521 ie->name_state = NAME_KNOWN;
522 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 }
524
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200525 memcpy(&ie->data, data, sizeof(*data));
526 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200528
529 if (ie->name_state == NAME_NOT_KNOWN)
530 return false;
531
532 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533}
534
535static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
536{
Johan Hedberg30883512012-01-04 14:16:21 +0200537 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 struct inquiry_info *info = (struct inquiry_info *) buf;
539 struct inquiry_entry *e;
540 int copied = 0;
541
Johan Hedberg561aafb2012-01-04 13:31:59 +0200542 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200544
545 if (copied >= num)
546 break;
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 bacpy(&info->bdaddr, &data->bdaddr);
549 info->pscan_rep_mode = data->pscan_rep_mode;
550 info->pscan_period_mode = data->pscan_period_mode;
551 info->pscan_mode = data->pscan_mode;
552 memcpy(info->dev_class, data->dev_class, 3);
553 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200556 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 }
558
559 BT_DBG("cache %p, copied %d", cache, copied);
560 return copied;
561}
562
563static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
564{
565 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
566 struct hci_cp_inquiry cp;
567
568 BT_DBG("%s", hdev->name);
569
570 if (test_bit(HCI_INQUIRY, &hdev->flags))
571 return;
572
573 /* Start Inquiry */
574 memcpy(&cp.lap, &ir->lap, 3);
575 cp.length = ir->length;
576 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200577 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578}
579
580int hci_inquiry(void __user *arg)
581{
582 __u8 __user *ptr = arg;
583 struct hci_inquiry_req ir;
584 struct hci_dev *hdev;
585 int err = 0, do_inquiry = 0, max_rsp;
586 long timeo;
587 __u8 *buf;
588
589 if (copy_from_user(&ir, ptr, sizeof(ir)))
590 return -EFAULT;
591
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200592 hdev = hci_dev_get(ir.dev_id);
593 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 return -ENODEV;
595
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300596 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900597 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200598 inquiry_cache_empty(hdev) ||
599 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 inquiry_cache_flush(hdev);
601 do_inquiry = 1;
602 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300603 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Marcel Holtmann04837f62006-07-03 10:02:33 +0200605 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606
607 if (do_inquiry) {
608 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
609 if (err < 0)
610 goto done;
611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 /* for unlimited number of responses we will use buffer with 255 entries */
614 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
615
616 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
617 * copy it to the user space.
618 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100619 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200620 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 err = -ENOMEM;
622 goto done;
623 }
624
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300625 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300627 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
629 BT_DBG("num_rsp %d", ir.num_rsp);
630
631 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
632 ptr += sizeof(ir);
633 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
634 ir.num_rsp))
635 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900636 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 err = -EFAULT;
638
639 kfree(buf);
640
641done:
642 hci_dev_put(hdev);
643 return err;
644}
645
646/* ---- HCI ioctl helpers ---- */
647
648int hci_dev_open(__u16 dev)
649{
650 struct hci_dev *hdev;
651 int ret = 0;
652
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200653 hdev = hci_dev_get(dev);
654 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 return -ENODEV;
656
657 BT_DBG("%s %p", hdev->name, hdev);
658
659 hci_req_lock(hdev);
660
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200661 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
662 ret = -ERFKILL;
663 goto done;
664 }
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (test_bit(HCI_UP, &hdev->flags)) {
667 ret = -EALREADY;
668 goto done;
669 }
670
671 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672 set_bit(HCI_RAW, &hdev->flags);
673
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100677 set_bit(HCI_RAW, &hdev->flags);
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (hdev->open(hdev)) {
680 ret = -EIO;
681 goto done;
682 }
683
684 if (!test_bit(HCI_RAW, &hdev->flags)) {
685 atomic_set(&hdev->cmd_cnt, 1);
686 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200687 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 ret = __hci_request(hdev, hci_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Andre Guedeseead27d2011-06-30 19:20:55 -0300692 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693 ret = __hci_request(hdev, hci_le_init_req, 0,
694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 clear_bit(HCI_INIT, &hdev->flags);
697 }
698
699 if (!ret) {
700 hci_dev_hold(hdev);
701 set_bit(HCI_UP, &hdev->flags);
702 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200703 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300704 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200705 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300706 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200707 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900708 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200710 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200711 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400712 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 skb_queue_purge(&hdev->cmd_q);
715 skb_queue_purge(&hdev->rx_q);
716
717 if (hdev->flush)
718 hdev->flush(hdev);
719
720 if (hdev->sent_cmd) {
721 kfree_skb(hdev->sent_cmd);
722 hdev->sent_cmd = NULL;
723 }
724
725 hdev->close(hdev);
726 hdev->flags = 0;
727 }
728
729done:
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
733}
734
735static int hci_dev_do_close(struct hci_dev *hdev)
736{
737 BT_DBG("%s %p", hdev->name, hdev);
738
Andre Guedes28b75a82012-02-03 17:48:00 -0300739 cancel_work_sync(&hdev->le_scan);
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 hci_req_cancel(hdev, ENODEV);
742 hci_req_lock(hdev);
743
744 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300745 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 hci_req_unlock(hdev);
747 return 0;
748 }
749
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200750 /* Flush RX and TX works */
751 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400752 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200754 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200755 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200756 hdev->discov_timeout = 0;
757 }
758
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200759 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200760 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200761
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200762 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200763 cancel_delayed_work(&hdev->service_cache);
764
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300765 cancel_delayed_work_sync(&hdev->le_scan_disable);
766
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300767 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 inquiry_cache_flush(hdev);
769 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300770 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
772 hci_notify(hdev, HCI_DEV_DOWN);
773
774 if (hdev->flush)
775 hdev->flush(hdev);
776
777 /* Reset device */
778 skb_queue_purge(&hdev->cmd_q);
779 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200780 if (!test_bit(HCI_RAW, &hdev->flags) &&
781 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200783 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200784 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 clear_bit(HCI_INIT, &hdev->flags);
786 }
787
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200788 /* flush cmd work */
789 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 /* Drop queues */
792 skb_queue_purge(&hdev->rx_q);
793 skb_queue_purge(&hdev->cmd_q);
794 skb_queue_purge(&hdev->raw_q);
795
796 /* Drop last sent command */
797 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300798 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 kfree_skb(hdev->sent_cmd);
800 hdev->sent_cmd = NULL;
801 }
802
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
805 hdev->close(hdev);
806
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300807 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200808 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300809 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /* Clear flags */
812 hdev->flags = 0;
813
814 hci_req_unlock(hdev);
815
816 hci_dev_put(hdev);
817 return 0;
818}
819
820int hci_dev_close(__u16 dev)
821{
822 struct hci_dev *hdev;
823 int err;
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 hdev = hci_dev_get(dev);
826 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENODEV;
828 err = hci_dev_do_close(hdev);
829 hci_dev_put(hdev);
830 return err;
831}
832
833int hci_dev_reset(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838 hdev = hci_dev_get(dev);
839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return -ENODEV;
841
842 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 if (!test_bit(HCI_UP, &hdev->flags))
845 goto done;
846
847 /* Drop queues */
848 skb_queue_purge(&hdev->rx_q);
849 skb_queue_purge(&hdev->cmd_q);
850
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300851 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 inquiry_cache_flush(hdev);
853 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300854 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (hdev->flush)
857 hdev->flush(hdev);
858
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900859 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300860 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200863 ret = __hci_request(hdev, hci_reset_req, 0,
864 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 hci_req_unlock(hdev);
868 hci_dev_put(hdev);
869 return ret;
870}
871
872int hci_dev_reset_stat(__u16 dev)
873{
874 struct hci_dev *hdev;
875 int ret = 0;
876
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200877 hdev = hci_dev_get(dev);
878 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 return -ENODEV;
880
881 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
882
883 hci_dev_put(hdev);
884
885 return ret;
886}
887
888int hci_dev_cmd(unsigned int cmd, void __user *arg)
889{
890 struct hci_dev *hdev;
891 struct hci_dev_req dr;
892 int err = 0;
893
894 if (copy_from_user(&dr, arg, sizeof(dr)))
895 return -EFAULT;
896
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200897 hdev = hci_dev_get(dr.dev_id);
898 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 return -ENODEV;
900
901 switch (cmd) {
902 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200903 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
904 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 break;
906
907 case HCISETENCRYPT:
908 if (!lmp_encrypt_capable(hdev)) {
909 err = -EOPNOTSUPP;
910 break;
911 }
912
913 if (!test_bit(HCI_AUTH, &hdev->flags)) {
914 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 if (err)
918 break;
919 }
920
Marcel Holtmann04837f62006-07-03 10:02:33 +0200921 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 break;
924
925 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200926 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 break;
929
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200930 case HCISETLINKPOL:
931 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
932 msecs_to_jiffies(HCI_INIT_TIMEOUT));
933 break;
934
935 case HCISETLINKMODE:
936 hdev->link_mode = ((__u16) dr.dev_opt) &
937 (HCI_LM_MASTER | HCI_LM_ACCEPT);
938 break;
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 case HCISETPTYPE:
941 hdev->pkt_type = (__u16) dr.dev_opt;
942 break;
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200945 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
946 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 break;
948
949 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200950 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
951 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 break;
953
954 default:
955 err = -EINVAL;
956 break;
957 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 hci_dev_put(hdev);
960 return err;
961}
962
963int hci_get_dev_list(void __user *arg)
964{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200965 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 struct hci_dev_list_req *dl;
967 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 int n = 0, size, err;
969 __u16 dev_num;
970
971 if (get_user(dev_num, (__u16 __user *) arg))
972 return -EFAULT;
973
974 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
975 return -EINVAL;
976
977 size = sizeof(*dl) + dev_num * sizeof(*dr);
978
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200979 dl = kzalloc(size, GFP_KERNEL);
980 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 return -ENOMEM;
982
983 dr = dl->dev_req;
984
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200985 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200986 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200988 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200990 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
991 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200992
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 (dr + n)->dev_id = hdev->id;
994 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200995
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (++n >= dev_num)
997 break;
998 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200999 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 dl->dev_num = n;
1002 size = sizeof(*dl) + n * sizeof(*dr);
1003
1004 err = copy_to_user(arg, dl, size);
1005 kfree(dl);
1006
1007 return err ? -EFAULT : 0;
1008}
1009
1010int hci_get_dev_info(void __user *arg)
1011{
1012 struct hci_dev *hdev;
1013 struct hci_dev_info di;
1014 int err = 0;
1015
1016 if (copy_from_user(&di, arg, sizeof(di)))
1017 return -EFAULT;
1018
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001019 hdev = hci_dev_get(di.dev_id);
1020 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 return -ENODEV;
1022
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001023 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001024 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001025
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001026 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1027 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 strcpy(di.name, hdev->name);
1030 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001031 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 di.flags = hdev->flags;
1033 di.pkt_type = hdev->pkt_type;
1034 di.acl_mtu = hdev->acl_mtu;
1035 di.acl_pkts = hdev->acl_pkts;
1036 di.sco_mtu = hdev->sco_mtu;
1037 di.sco_pkts = hdev->sco_pkts;
1038 di.link_policy = hdev->link_policy;
1039 di.link_mode = hdev->link_mode;
1040
1041 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1042 memcpy(&di.features, &hdev->features, sizeof(di.features));
1043
1044 if (copy_to_user(arg, &di, sizeof(di)))
1045 err = -EFAULT;
1046
1047 hci_dev_put(hdev);
1048
1049 return err;
1050}
1051
1052/* ---- Interface to HCI drivers ---- */
1053
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001054static int hci_rfkill_set_block(void *data, bool blocked)
1055{
1056 struct hci_dev *hdev = data;
1057
1058 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1059
1060 if (!blocked)
1061 return 0;
1062
1063 hci_dev_do_close(hdev);
1064
1065 return 0;
1066}
1067
1068static const struct rfkill_ops hci_rfkill_ops = {
1069 .set_block = hci_rfkill_set_block,
1070};
1071
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072/* Alloc HCI device */
1073struct hci_dev *hci_alloc_dev(void)
1074{
1075 struct hci_dev *hdev;
1076
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001077 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 if (!hdev)
1079 return NULL;
1080
David Herrmann0ac7e702011-10-08 14:58:47 +02001081 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 skb_queue_head_init(&hdev->driver_init);
1083
1084 return hdev;
1085}
1086EXPORT_SYMBOL(hci_alloc_dev);
1087
1088/* Free HCI device */
1089void hci_free_dev(struct hci_dev *hdev)
1090{
1091 skb_queue_purge(&hdev->driver_init);
1092
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001093 /* will free via device release */
1094 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095}
1096EXPORT_SYMBOL(hci_free_dev);
1097
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001098static void hci_power_on(struct work_struct *work)
1099{
1100 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1101
1102 BT_DBG("%s", hdev->name);
1103
1104 if (hci_dev_open(hdev->id) < 0)
1105 return;
1106
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001107 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001108 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001109 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001110
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001111 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001112 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001113}
1114
1115static void hci_power_off(struct work_struct *work)
1116{
Johan Hedberg32435532011-11-07 22:16:04 +02001117 struct hci_dev *hdev = container_of(work, struct hci_dev,
1118 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001119
1120 BT_DBG("%s", hdev->name);
1121
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001122 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001123
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001124 hci_dev_close(hdev->id);
1125}
1126
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001127static void hci_discov_off(struct work_struct *work)
1128{
1129 struct hci_dev *hdev;
1130 u8 scan = SCAN_PAGE;
1131
1132 hdev = container_of(work, struct hci_dev, discov_off.work);
1133
1134 BT_DBG("%s", hdev->name);
1135
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001136 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001137
1138 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1139
1140 hdev->discov_timeout = 0;
1141
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001142 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001143}
1144
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001145int hci_uuids_clear(struct hci_dev *hdev)
1146{
1147 struct list_head *p, *n;
1148
1149 list_for_each_safe(p, n, &hdev->uuids) {
1150 struct bt_uuid *uuid;
1151
1152 uuid = list_entry(p, struct bt_uuid, list);
1153
1154 list_del(p);
1155 kfree(uuid);
1156 }
1157
1158 return 0;
1159}
1160
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001161int hci_link_keys_clear(struct hci_dev *hdev)
1162{
1163 struct list_head *p, *n;
1164
1165 list_for_each_safe(p, n, &hdev->link_keys) {
1166 struct link_key *key;
1167
1168 key = list_entry(p, struct link_key, list);
1169
1170 list_del(p);
1171 kfree(key);
1172 }
1173
1174 return 0;
1175}
1176
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001177int hci_smp_ltks_clear(struct hci_dev *hdev)
1178{
1179 struct smp_ltk *k, *tmp;
1180
1181 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1182 list_del(&k->list);
1183 kfree(k);
1184 }
1185
1186 return 0;
1187}
1188
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001189struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1190{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001191 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001192
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001193 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001194 if (bacmp(bdaddr, &k->bdaddr) == 0)
1195 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001196
1197 return NULL;
1198}
1199
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001200static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1201 u8 key_type, u8 old_key_type)
1202{
1203 /* Legacy key */
1204 if (key_type < 0x03)
1205 return 1;
1206
1207 /* Debug keys are insecure so don't store them persistently */
1208 if (key_type == HCI_LK_DEBUG_COMBINATION)
1209 return 0;
1210
1211 /* Changed combination key and there's no previous one */
1212 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1213 return 0;
1214
1215 /* Security mode 3 case */
1216 if (!conn)
1217 return 1;
1218
1219 /* Neither local nor remote side had no-bonding as requirement */
1220 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1221 return 1;
1222
1223 /* Local side had dedicated bonding as requirement */
1224 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1225 return 1;
1226
1227 /* Remote side had dedicated bonding as requirement */
1228 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1229 return 1;
1230
1231 /* If none of the above criteria match, then don't store the key
1232 * persistently */
1233 return 0;
1234}
1235
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001236struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001237{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001238 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 list_for_each_entry(k, &hdev->long_term_keys, list) {
1241 if (k->ediv != ediv ||
1242 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243 continue;
1244
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001245 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001246 }
1247
1248 return NULL;
1249}
1250EXPORT_SYMBOL(hci_find_ltk);
1251
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1253 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001254{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257 list_for_each_entry(k, &hdev->long_term_keys, list)
1258 if (addr_type == k->bdaddr_type &&
1259 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260 return k;
1261
1262 return NULL;
1263}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001264EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001265
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001266int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1267 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001268{
1269 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001270 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001271
1272 old_key = hci_find_link_key(hdev, bdaddr);
1273 if (old_key) {
1274 old_key_type = old_key->type;
1275 key = old_key;
1276 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001277 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001278 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1279 if (!key)
1280 return -ENOMEM;
1281 list_add(&key->list, &hdev->link_keys);
1282 }
1283
1284 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1285
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001286 /* Some buggy controller combinations generate a changed
1287 * combination key for legacy pairing even when there's no
1288 * previous key */
1289 if (type == HCI_LK_CHANGED_COMBINATION &&
1290 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001291 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001292 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001293 if (conn)
1294 conn->key_type = type;
1295 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001296
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001297 bacpy(&key->bdaddr, bdaddr);
1298 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001299 key->pin_len = pin_len;
1300
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001301 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001302 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001303 else
1304 key->type = type;
1305
Johan Hedberg4df378a2011-04-28 11:29:03 -07001306 if (!new_key)
1307 return 0;
1308
1309 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1310
Johan Hedberg744cf192011-11-08 20:40:14 +02001311 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001312
1313 if (!persistent) {
1314 list_del(&key->list);
1315 kfree(key);
1316 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001317
1318 return 0;
1319}
1320
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1322 int new_key, u8 authenticated, u8 tk[16],
1323 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1328 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001330 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1331 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001333 else {
1334 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 if (!key)
1336 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001337 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001338 }
1339
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001340 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001341 key->bdaddr_type = addr_type;
1342 memcpy(key->val, tk, sizeof(key->val));
1343 key->authenticated = authenticated;
1344 key->ediv = ediv;
1345 key->enc_size = enc_size;
1346 key->type = type;
1347 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001348
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001349 if (!new_key)
1350 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001351
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001352 if (type & HCI_SMP_LTK)
1353 mgmt_new_ltk(hdev, key, 1);
1354
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355 return 0;
1356}
1357
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001358int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359{
1360 struct link_key *key;
1361
1362 key = hci_find_link_key(hdev, bdaddr);
1363 if (!key)
1364 return -ENOENT;
1365
1366 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1367
1368 list_del(&key->list);
1369 kfree(key);
1370
1371 return 0;
1372}
1373
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001374int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1375{
1376 struct smp_ltk *k, *tmp;
1377
1378 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1379 if (bacmp(bdaddr, &k->bdaddr))
1380 continue;
1381
1382 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1383
1384 list_del(&k->list);
1385 kfree(k);
1386 }
1387
1388 return 0;
1389}
1390
Ville Tervo6bd32322011-02-16 16:32:41 +02001391/* HCI command timer function */
1392static void hci_cmd_timer(unsigned long arg)
1393{
1394 struct hci_dev *hdev = (void *) arg;
1395
1396 BT_ERR("%s command tx timeout", hdev->name);
1397 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001398 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001399}
1400
Szymon Janc2763eda2011-03-22 13:12:22 +01001401struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1402 bdaddr_t *bdaddr)
1403{
1404 struct oob_data *data;
1405
1406 list_for_each_entry(data, &hdev->remote_oob_data, list)
1407 if (bacmp(bdaddr, &data->bdaddr) == 0)
1408 return data;
1409
1410 return NULL;
1411}
1412
1413int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1414{
1415 struct oob_data *data;
1416
1417 data = hci_find_remote_oob_data(hdev, bdaddr);
1418 if (!data)
1419 return -ENOENT;
1420
1421 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1422
1423 list_del(&data->list);
1424 kfree(data);
1425
1426 return 0;
1427}
1428
1429int hci_remote_oob_data_clear(struct hci_dev *hdev)
1430{
1431 struct oob_data *data, *n;
1432
1433 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1434 list_del(&data->list);
1435 kfree(data);
1436 }
1437
1438 return 0;
1439}
1440
1441int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1442 u8 *randomizer)
1443{
1444 struct oob_data *data;
1445
1446 data = hci_find_remote_oob_data(hdev, bdaddr);
1447
1448 if (!data) {
1449 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1450 if (!data)
1451 return -ENOMEM;
1452
1453 bacpy(&data->bdaddr, bdaddr);
1454 list_add(&data->list, &hdev->remote_oob_data);
1455 }
1456
1457 memcpy(data->hash, hash, sizeof(data->hash));
1458 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1459
1460 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1461
1462 return 0;
1463}
1464
Antti Julkub2a66aa2011-06-15 12:01:14 +03001465struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1466 bdaddr_t *bdaddr)
1467{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001468 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001469
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001470 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471 if (bacmp(bdaddr, &b->bdaddr) == 0)
1472 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001473
1474 return NULL;
1475}
1476
1477int hci_blacklist_clear(struct hci_dev *hdev)
1478{
1479 struct list_head *p, *n;
1480
1481 list_for_each_safe(p, n, &hdev->blacklist) {
1482 struct bdaddr_list *b;
1483
1484 b = list_entry(p, struct bdaddr_list, list);
1485
1486 list_del(p);
1487 kfree(b);
1488 }
1489
1490 return 0;
1491}
1492
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001493int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494{
1495 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
1497 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1498 return -EBADF;
1499
Antti Julku5e762442011-08-25 16:48:02 +03001500 if (hci_blacklist_lookup(hdev, bdaddr))
1501 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001502
1503 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001504 if (!entry)
1505 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506
1507 bacpy(&entry->bdaddr, bdaddr);
1508
1509 list_add(&entry->list, &hdev->blacklist);
1510
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001511 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512}
1513
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001514int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515{
1516 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517
Szymon Janc1ec918c2011-11-16 09:32:21 +01001518 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001519 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001520
1521 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001522 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001523 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001524
1525 list_del(&entry->list);
1526 kfree(entry);
1527
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001528 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001529}
1530
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001531static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001532{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001533 struct hci_dev *hdev = container_of(work, struct hci_dev,
1534 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001535
1536 hci_dev_lock(hdev);
1537
1538 hci_adv_entries_clear(hdev);
1539
1540 hci_dev_unlock(hdev);
1541}
1542
Andre Guedes76c86862011-05-26 16:23:50 -03001543int hci_adv_entries_clear(struct hci_dev *hdev)
1544{
1545 struct adv_entry *entry, *tmp;
1546
1547 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1548 list_del(&entry->list);
1549 kfree(entry);
1550 }
1551
1552 BT_DBG("%s adv cache cleared", hdev->name);
1553
1554 return 0;
1555}
1556
1557struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1558{
1559 struct adv_entry *entry;
1560
1561 list_for_each_entry(entry, &hdev->adv_entries, list)
1562 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1563 return entry;
1564
1565 return NULL;
1566}
1567
1568static inline int is_connectable_adv(u8 evt_type)
1569{
1570 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1571 return 1;
1572
1573 return 0;
1574}
1575
1576int hci_add_adv_entry(struct hci_dev *hdev,
1577 struct hci_ev_le_advertising_info *ev)
1578{
1579 struct adv_entry *entry;
1580
1581 if (!is_connectable_adv(ev->evt_type))
1582 return -EINVAL;
1583
1584 /* Only new entries should be added to adv_entries. So, if
1585 * bdaddr was found, don't add it. */
1586 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1587 return 0;
1588
Andre Guedes4777bfd2012-01-30 23:31:28 -03001589 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001590 if (!entry)
1591 return -ENOMEM;
1592
1593 bacpy(&entry->bdaddr, &ev->bdaddr);
1594 entry->bdaddr_type = ev->bdaddr_type;
1595
1596 list_add(&entry->list, &hdev->adv_entries);
1597
1598 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1599 batostr(&entry->bdaddr), entry->bdaddr_type);
1600
1601 return 0;
1602}
1603
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001604static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1605{
1606 struct le_scan_params *param = (struct le_scan_params *) opt;
1607 struct hci_cp_le_set_scan_param cp;
1608
1609 memset(&cp, 0, sizeof(cp));
1610 cp.type = param->type;
1611 cp.interval = cpu_to_le16(param->interval);
1612 cp.window = cpu_to_le16(param->window);
1613
1614 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1615}
1616
1617static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1618{
1619 struct hci_cp_le_set_scan_enable cp;
1620
1621 memset(&cp, 0, sizeof(cp));
1622 cp.enable = 1;
1623
1624 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1625}
1626
1627static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1628 u16 window, int timeout)
1629{
1630 long timeo = msecs_to_jiffies(3000);
1631 struct le_scan_params param;
1632 int err;
1633
1634 BT_DBG("%s", hdev->name);
1635
1636 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1637 return -EINPROGRESS;
1638
1639 param.type = type;
1640 param.interval = interval;
1641 param.window = window;
1642
1643 hci_req_lock(hdev);
1644
1645 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1646 timeo);
1647 if (!err)
1648 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1649
1650 hci_req_unlock(hdev);
1651
1652 if (err < 0)
1653 return err;
1654
1655 schedule_delayed_work(&hdev->le_scan_disable,
1656 msecs_to_jiffies(timeout));
1657
1658 return 0;
1659}
1660
1661static void le_scan_disable_work(struct work_struct *work)
1662{
1663 struct hci_dev *hdev = container_of(work, struct hci_dev,
1664 le_scan_disable.work);
1665 struct hci_cp_le_set_scan_enable cp;
1666
1667 BT_DBG("%s", hdev->name);
1668
1669 memset(&cp, 0, sizeof(cp));
1670
1671 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1672}
1673
Andre Guedes28b75a82012-02-03 17:48:00 -03001674static void le_scan_work(struct work_struct *work)
1675{
1676 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1677 struct le_scan_params *param = &hdev->le_scan_params;
1678
1679 BT_DBG("%s", hdev->name);
1680
1681 hci_do_le_scan(hdev, param->type, param->interval,
1682 param->window, param->timeout);
1683}
1684
1685int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1686 int timeout)
1687{
1688 struct le_scan_params *param = &hdev->le_scan_params;
1689
1690 BT_DBG("%s", hdev->name);
1691
1692 if (work_busy(&hdev->le_scan))
1693 return -EINPROGRESS;
1694
1695 param->type = type;
1696 param->interval = interval;
1697 param->window = window;
1698 param->timeout = timeout;
1699
1700 queue_work(system_long_wq, &hdev->le_scan);
1701
1702 return 0;
1703}
1704
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705/* Register HCI device */
1706int hci_register_dev(struct hci_dev *hdev)
1707{
1708 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001709 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001711 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
David Herrmann010666a2012-01-07 15:47:07 +01001713 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return -EINVAL;
1715
Mat Martineau08add512011-11-02 16:18:36 -07001716 /* Do not allow HCI_AMP devices to register at index 0,
1717 * so the index can be used as the AMP controller ID.
1718 */
1719 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1720
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001721 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 /* Find first available device id */
1724 list_for_each(p, &hci_dev_list) {
1725 if (list_entry(p, struct hci_dev, list)->id != id)
1726 break;
1727 head = p; id++;
1728 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 sprintf(hdev->name, "hci%d", id);
1731 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001732 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001734 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
1736 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001737 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001739 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001741 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Marcel Holtmann04837f62006-07-03 10:02:33 +02001743 hdev->idle_timeout = 0;
1744 hdev->sniff_max_interval = 800;
1745 hdev->sniff_min_interval = 80;
1746
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001747 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001748 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001749 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001750
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 skb_queue_head_init(&hdev->rx_q);
1753 skb_queue_head_init(&hdev->cmd_q);
1754 skb_queue_head_init(&hdev->raw_q);
1755
Ville Tervo6bd32322011-02-16 16:32:41 +02001756 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1757
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301758 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001759 hdev->reassembly[i] = NULL;
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001762 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
Johan Hedberg30883512012-01-04 14:16:21 +02001764 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
1766 hci_conn_hash_init(hdev);
1767
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001768 INIT_LIST_HEAD(&hdev->mgmt_pending);
1769
David Millerea4bd8b2010-07-30 21:54:49 -07001770 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001771
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001772 INIT_LIST_HEAD(&hdev->uuids);
1773
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001774 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001775 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776
Szymon Janc2763eda2011-03-22 13:12:22 +01001777 INIT_LIST_HEAD(&hdev->remote_oob_data);
1778
Andre Guedes76c86862011-05-26 16:23:50 -03001779 INIT_LIST_HEAD(&hdev->adv_entries);
1780
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001781 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001782 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001783 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001785 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1788
1789 atomic_set(&hdev->promisc, 0);
1790
Andre Guedes28b75a82012-02-03 17:48:00 -03001791 INIT_WORK(&hdev->le_scan, le_scan_work);
1792
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001793 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1794
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001795 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001797 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1798 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001799 if (!hdev->workqueue) {
1800 error = -ENOMEM;
1801 goto err;
1802 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001803
David Herrmann33ca9542011-10-08 14:58:49 +02001804 error = hci_add_sysfs(hdev);
1805 if (error < 0)
1806 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001808 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1809 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1810 if (hdev->rfkill) {
1811 if (rfkill_register(hdev->rfkill) < 0) {
1812 rfkill_destroy(hdev->rfkill);
1813 hdev->rfkill = NULL;
1814 }
1815 }
1816
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001817 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1818 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001819 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001820
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001822 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
1824 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001825
David Herrmann33ca9542011-10-08 14:58:49 +02001826err_wqueue:
1827 destroy_workqueue(hdev->workqueue);
1828err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001829 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001830 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001831 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001832
David Herrmann33ca9542011-10-08 14:58:49 +02001833 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835EXPORT_SYMBOL(hci_register_dev);
1836
1837/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001838void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839{
Marcel Holtmannef222012007-07-11 06:42:04 +02001840 int i;
1841
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001842 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001844 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001846 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847
1848 hci_dev_do_close(hdev);
1849
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301850 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001851 kfree_skb(hdev->reassembly[i]);
1852
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001853 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001854 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001855 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001856 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001858 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001859
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001860 /* mgmt_index_removed should take care of emptying the
1861 * pending list */
1862 BUG_ON(!list_empty(&hdev->mgmt_pending));
1863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 hci_notify(hdev, HCI_DEV_UNREG);
1865
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001866 if (hdev->rfkill) {
1867 rfkill_unregister(hdev->rfkill);
1868 rfkill_destroy(hdev->rfkill);
1869 }
1870
David Herrmannce242972011-10-08 14:58:48 +02001871 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001872
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001873 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001874
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001875 destroy_workqueue(hdev->workqueue);
1876
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001877 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001878 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001879 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001880 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001881 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001882 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001883 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001884 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001885
David Herrmanndc946bd2012-01-07 15:47:24 +01001886 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887}
1888EXPORT_SYMBOL(hci_unregister_dev);
1889
1890/* Suspend HCI device */
1891int hci_suspend_dev(struct hci_dev *hdev)
1892{
1893 hci_notify(hdev, HCI_DEV_SUSPEND);
1894 return 0;
1895}
1896EXPORT_SYMBOL(hci_suspend_dev);
1897
1898/* Resume HCI device */
1899int hci_resume_dev(struct hci_dev *hdev)
1900{
1901 hci_notify(hdev, HCI_DEV_RESUME);
1902 return 0;
1903}
1904EXPORT_SYMBOL(hci_resume_dev);
1905
Marcel Holtmann76bca882009-11-18 00:40:39 +01001906/* Receive frame from HCI drivers */
1907int hci_recv_frame(struct sk_buff *skb)
1908{
1909 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1910 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1911 && !test_bit(HCI_INIT, &hdev->flags))) {
1912 kfree_skb(skb);
1913 return -ENXIO;
1914 }
1915
1916 /* Incomming skb */
1917 bt_cb(skb)->incoming = 1;
1918
1919 /* Time stamp */
1920 __net_timestamp(skb);
1921
Marcel Holtmann76bca882009-11-18 00:40:39 +01001922 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001923 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001924
Marcel Holtmann76bca882009-11-18 00:40:39 +01001925 return 0;
1926}
1927EXPORT_SYMBOL(hci_recv_frame);
1928
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301929static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001930 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301931{
1932 int len = 0;
1933 int hlen = 0;
1934 int remain = count;
1935 struct sk_buff *skb;
1936 struct bt_skb_cb *scb;
1937
1938 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1939 index >= NUM_REASSEMBLY)
1940 return -EILSEQ;
1941
1942 skb = hdev->reassembly[index];
1943
1944 if (!skb) {
1945 switch (type) {
1946 case HCI_ACLDATA_PKT:
1947 len = HCI_MAX_FRAME_SIZE;
1948 hlen = HCI_ACL_HDR_SIZE;
1949 break;
1950 case HCI_EVENT_PKT:
1951 len = HCI_MAX_EVENT_SIZE;
1952 hlen = HCI_EVENT_HDR_SIZE;
1953 break;
1954 case HCI_SCODATA_PKT:
1955 len = HCI_MAX_SCO_SIZE;
1956 hlen = HCI_SCO_HDR_SIZE;
1957 break;
1958 }
1959
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001960 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301961 if (!skb)
1962 return -ENOMEM;
1963
1964 scb = (void *) skb->cb;
1965 scb->expect = hlen;
1966 scb->pkt_type = type;
1967
1968 skb->dev = (void *) hdev;
1969 hdev->reassembly[index] = skb;
1970 }
1971
1972 while (count) {
1973 scb = (void *) skb->cb;
1974 len = min(scb->expect, (__u16)count);
1975
1976 memcpy(skb_put(skb, len), data, len);
1977
1978 count -= len;
1979 data += len;
1980 scb->expect -= len;
1981 remain = count;
1982
1983 switch (type) {
1984 case HCI_EVENT_PKT:
1985 if (skb->len == HCI_EVENT_HDR_SIZE) {
1986 struct hci_event_hdr *h = hci_event_hdr(skb);
1987 scb->expect = h->plen;
1988
1989 if (skb_tailroom(skb) < scb->expect) {
1990 kfree_skb(skb);
1991 hdev->reassembly[index] = NULL;
1992 return -ENOMEM;
1993 }
1994 }
1995 break;
1996
1997 case HCI_ACLDATA_PKT:
1998 if (skb->len == HCI_ACL_HDR_SIZE) {
1999 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2000 scb->expect = __le16_to_cpu(h->dlen);
2001
2002 if (skb_tailroom(skb) < scb->expect) {
2003 kfree_skb(skb);
2004 hdev->reassembly[index] = NULL;
2005 return -ENOMEM;
2006 }
2007 }
2008 break;
2009
2010 case HCI_SCODATA_PKT:
2011 if (skb->len == HCI_SCO_HDR_SIZE) {
2012 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2013 scb->expect = h->dlen;
2014
2015 if (skb_tailroom(skb) < scb->expect) {
2016 kfree_skb(skb);
2017 hdev->reassembly[index] = NULL;
2018 return -ENOMEM;
2019 }
2020 }
2021 break;
2022 }
2023
2024 if (scb->expect == 0) {
2025 /* Complete frame */
2026
2027 bt_cb(skb)->pkt_type = type;
2028 hci_recv_frame(skb);
2029
2030 hdev->reassembly[index] = NULL;
2031 return remain;
2032 }
2033 }
2034
2035 return remain;
2036}
2037
Marcel Holtmannef222012007-07-11 06:42:04 +02002038int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2039{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302040 int rem = 0;
2041
Marcel Holtmannef222012007-07-11 06:42:04 +02002042 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2043 return -EILSEQ;
2044
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002045 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002046 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302047 if (rem < 0)
2048 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002049
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302050 data += (count - rem);
2051 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002052 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002053
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302054 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002055}
2056EXPORT_SYMBOL(hci_recv_fragment);
2057
Suraj Sumangala99811512010-07-14 13:02:19 +05302058#define STREAM_REASSEMBLY 0
2059
2060int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2061{
2062 int type;
2063 int rem = 0;
2064
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002065 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302066 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2067
2068 if (!skb) {
2069 struct { char type; } *pkt;
2070
2071 /* Start of the frame */
2072 pkt = data;
2073 type = pkt->type;
2074
2075 data++;
2076 count--;
2077 } else
2078 type = bt_cb(skb)->pkt_type;
2079
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002080 rem = hci_reassembly(hdev, type, data, count,
2081 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302082 if (rem < 0)
2083 return rem;
2084
2085 data += (count - rem);
2086 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002087 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302088
2089 return rem;
2090}
2091EXPORT_SYMBOL(hci_recv_stream_fragment);
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093/* ---- Interface to upper protocols ---- */
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095int hci_register_cb(struct hci_cb *cb)
2096{
2097 BT_DBG("%p name %s", cb, cb->name);
2098
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002099 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002101 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 return 0;
2104}
2105EXPORT_SYMBOL(hci_register_cb);
2106
2107int hci_unregister_cb(struct hci_cb *cb)
2108{
2109 BT_DBG("%p name %s", cb, cb->name);
2110
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002111 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002113 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 return 0;
2116}
2117EXPORT_SYMBOL(hci_unregister_cb);
2118
2119static int hci_send_frame(struct sk_buff *skb)
2120{
2121 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2122
2123 if (!hdev) {
2124 kfree_skb(skb);
2125 return -ENODEV;
2126 }
2127
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002128 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130 if (atomic_read(&hdev->promisc)) {
2131 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002132 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002134 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 }
2136
2137 /* Get rid of skb owner, prior to sending to the driver. */
2138 skb_orphan(skb);
2139
2140 return hdev->send(skb);
2141}
2142
2143/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002144int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145{
2146 int len = HCI_COMMAND_HDR_SIZE + plen;
2147 struct hci_command_hdr *hdr;
2148 struct sk_buff *skb;
2149
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002150 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 skb = bt_skb_alloc(len, GFP_ATOMIC);
2153 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002154 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 return -ENOMEM;
2156 }
2157
2158 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002159 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 hdr->plen = plen;
2161
2162 if (plen)
2163 memcpy(skb_put(skb, plen), param, plen);
2164
2165 BT_DBG("skb len %d", skb->len);
2166
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002167 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002169
Johan Hedberga5040ef2011-01-10 13:28:59 +02002170 if (test_bit(HCI_INIT, &hdev->flags))
2171 hdev->init_last_cmd = opcode;
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002174 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 return 0;
2177}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002180void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181{
2182 struct hci_command_hdr *hdr;
2183
2184 if (!hdev->sent_cmd)
2185 return NULL;
2186
2187 hdr = (void *) hdev->sent_cmd->data;
2188
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002189 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 return NULL;
2191
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002192 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
2194 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2195}
2196
2197/* Send ACL data */
2198static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2199{
2200 struct hci_acl_hdr *hdr;
2201 int len = skb->len;
2202
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002203 skb_push(skb, HCI_ACL_HDR_SIZE);
2204 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002205 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002206 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2207 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
2209
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002210static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2211 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
2213 struct hci_dev *hdev = conn->hdev;
2214 struct sk_buff *list;
2215
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002216 list = skb_shinfo(skb)->frag_list;
2217 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 /* Non fragmented */
2219 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2220
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002221 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 } else {
2223 /* Fragmented */
2224 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2225
2226 skb_shinfo(skb)->frag_list = NULL;
2227
2228 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002229 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002231 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002232
2233 flags &= ~ACL_START;
2234 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 do {
2236 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002237
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002239 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002240 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2243
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002244 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 } while (list);
2246
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002247 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002249}
2250
2251void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2252{
2253 struct hci_conn *conn = chan->conn;
2254 struct hci_dev *hdev = conn->hdev;
2255
2256 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2257
2258 skb->dev = (void *) hdev;
2259 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2260 hci_add_acl_hdr(skb, conn->handle, flags);
2261
2262 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002264 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265}
2266EXPORT_SYMBOL(hci_send_acl);
2267
2268/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002269void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270{
2271 struct hci_dev *hdev = conn->hdev;
2272 struct hci_sco_hdr hdr;
2273
2274 BT_DBG("%s len %d", hdev->name, skb->len);
2275
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002276 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 hdr.dlen = skb->len;
2278
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002279 skb_push(skb, HCI_SCO_HDR_SIZE);
2280 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002281 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002284 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002285
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002287 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288}
2289EXPORT_SYMBOL(hci_send_sco);
2290
2291/* ---- HCI TX task (outgoing data) ---- */
2292
2293/* HCI Connection scheduler */
2294static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2295{
2296 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002297 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002300 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002302
2303 rcu_read_lock();
2304
2305 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002306 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002308
2309 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2310 continue;
2311
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 num++;
2313
2314 if (c->sent < min) {
2315 min = c->sent;
2316 conn = c;
2317 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002318
2319 if (hci_conn_num(hdev, type) == num)
2320 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 }
2322
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002323 rcu_read_unlock();
2324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002326 int cnt, q;
2327
2328 switch (conn->type) {
2329 case ACL_LINK:
2330 cnt = hdev->acl_cnt;
2331 break;
2332 case SCO_LINK:
2333 case ESCO_LINK:
2334 cnt = hdev->sco_cnt;
2335 break;
2336 case LE_LINK:
2337 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2338 break;
2339 default:
2340 cnt = 0;
2341 BT_ERR("Unknown link type");
2342 }
2343
2344 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 *quote = q ? q : 1;
2346 } else
2347 *quote = 0;
2348
2349 BT_DBG("conn %p quote %d", conn, *quote);
2350 return conn;
2351}
2352
Ville Tervobae1f5d92011-02-10 22:38:53 -03002353static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354{
2355 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002356 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
Ville Tervobae1f5d92011-02-10 22:38:53 -03002358 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002360 rcu_read_lock();
2361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002363 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002364 if (c->type == type && c->sent) {
2365 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 hdev->name, batostr(&c->dst));
2367 hci_acl_disconn(c, 0x13);
2368 }
2369 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002370
2371 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372}
2373
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002374static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2375 int *quote)
2376{
2377 struct hci_conn_hash *h = &hdev->conn_hash;
2378 struct hci_chan *chan = NULL;
2379 int num = 0, min = ~0, cur_prio = 0;
2380 struct hci_conn *conn;
2381 int cnt, q, conn_num = 0;
2382
2383 BT_DBG("%s", hdev->name);
2384
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002385 rcu_read_lock();
2386
2387 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002388 struct hci_chan *tmp;
2389
2390 if (conn->type != type)
2391 continue;
2392
2393 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2394 continue;
2395
2396 conn_num++;
2397
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002398 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002399 struct sk_buff *skb;
2400
2401 if (skb_queue_empty(&tmp->data_q))
2402 continue;
2403
2404 skb = skb_peek(&tmp->data_q);
2405 if (skb->priority < cur_prio)
2406 continue;
2407
2408 if (skb->priority > cur_prio) {
2409 num = 0;
2410 min = ~0;
2411 cur_prio = skb->priority;
2412 }
2413
2414 num++;
2415
2416 if (conn->sent < min) {
2417 min = conn->sent;
2418 chan = tmp;
2419 }
2420 }
2421
2422 if (hci_conn_num(hdev, type) == conn_num)
2423 break;
2424 }
2425
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002426 rcu_read_unlock();
2427
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002428 if (!chan)
2429 return NULL;
2430
2431 switch (chan->conn->type) {
2432 case ACL_LINK:
2433 cnt = hdev->acl_cnt;
2434 break;
2435 case SCO_LINK:
2436 case ESCO_LINK:
2437 cnt = hdev->sco_cnt;
2438 break;
2439 case LE_LINK:
2440 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2441 break;
2442 default:
2443 cnt = 0;
2444 BT_ERR("Unknown link type");
2445 }
2446
2447 q = cnt / num;
2448 *quote = q ? q : 1;
2449 BT_DBG("chan %p quote %d", chan, *quote);
2450 return chan;
2451}
2452
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002453static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2454{
2455 struct hci_conn_hash *h = &hdev->conn_hash;
2456 struct hci_conn *conn;
2457 int num = 0;
2458
2459 BT_DBG("%s", hdev->name);
2460
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002461 rcu_read_lock();
2462
2463 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002464 struct hci_chan *chan;
2465
2466 if (conn->type != type)
2467 continue;
2468
2469 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2470 continue;
2471
2472 num++;
2473
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002474 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002475 struct sk_buff *skb;
2476
2477 if (chan->sent) {
2478 chan->sent = 0;
2479 continue;
2480 }
2481
2482 if (skb_queue_empty(&chan->data_q))
2483 continue;
2484
2485 skb = skb_peek(&chan->data_q);
2486 if (skb->priority >= HCI_PRIO_MAX - 1)
2487 continue;
2488
2489 skb->priority = HCI_PRIO_MAX - 1;
2490
2491 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2492 skb->priority);
2493 }
2494
2495 if (hci_conn_num(hdev, type) == num)
2496 break;
2497 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002498
2499 rcu_read_unlock();
2500
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002501}
2502
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002503static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2504{
2505 /* Calculate count of blocks used by this packet */
2506 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2507}
2508
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002509static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 if (!test_bit(HCI_RAW, &hdev->flags)) {
2512 /* ACL tx timeout must be longer than maximum
2513 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002514 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002515 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002516 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002518}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002520static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2521{
2522 unsigned int cnt = hdev->acl_cnt;
2523 struct hci_chan *chan;
2524 struct sk_buff *skb;
2525 int quote;
2526
2527 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002528
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002529 while (hdev->acl_cnt &&
2530 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002531 u32 priority = (skb_peek(&chan->data_q))->priority;
2532 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002533 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2534 skb->len, skb->priority);
2535
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002536 /* Stop if priority has changed */
2537 if (skb->priority < priority)
2538 break;
2539
2540 skb = skb_dequeue(&chan->data_q);
2541
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002542 hci_conn_enter_active_mode(chan->conn,
2543 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002544
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 hci_send_frame(skb);
2546 hdev->acl_last_tx = jiffies;
2547
2548 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002549 chan->sent++;
2550 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 }
2552 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002553
2554 if (cnt != hdev->acl_cnt)
2555 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556}
2557
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002558static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2559{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002560 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002561 struct hci_chan *chan;
2562 struct sk_buff *skb;
2563 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002564
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002565 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002566
2567 while (hdev->block_cnt > 0 &&
2568 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2569 u32 priority = (skb_peek(&chan->data_q))->priority;
2570 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2571 int blocks;
2572
2573 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2574 skb->len, skb->priority);
2575
2576 /* Stop if priority has changed */
2577 if (skb->priority < priority)
2578 break;
2579
2580 skb = skb_dequeue(&chan->data_q);
2581
2582 blocks = __get_blocks(hdev, skb);
2583 if (blocks > hdev->block_cnt)
2584 return;
2585
2586 hci_conn_enter_active_mode(chan->conn,
2587 bt_cb(skb)->force_active);
2588
2589 hci_send_frame(skb);
2590 hdev->acl_last_tx = jiffies;
2591
2592 hdev->block_cnt -= blocks;
2593 quote -= blocks;
2594
2595 chan->sent += blocks;
2596 chan->conn->sent += blocks;
2597 }
2598 }
2599
2600 if (cnt != hdev->block_cnt)
2601 hci_prio_recalculate(hdev, ACL_LINK);
2602}
2603
2604static inline void hci_sched_acl(struct hci_dev *hdev)
2605{
2606 BT_DBG("%s", hdev->name);
2607
2608 if (!hci_conn_num(hdev, ACL_LINK))
2609 return;
2610
2611 switch (hdev->flow_ctl_mode) {
2612 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2613 hci_sched_acl_pkt(hdev);
2614 break;
2615
2616 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2617 hci_sched_acl_blk(hdev);
2618 break;
2619 }
2620}
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622/* Schedule SCO */
2623static inline void hci_sched_sco(struct hci_dev *hdev)
2624{
2625 struct hci_conn *conn;
2626 struct sk_buff *skb;
2627 int quote;
2628
2629 BT_DBG("%s", hdev->name);
2630
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002631 if (!hci_conn_num(hdev, SCO_LINK))
2632 return;
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2635 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2636 BT_DBG("skb %p len %d", skb, skb->len);
2637 hci_send_frame(skb);
2638
2639 conn->sent++;
2640 if (conn->sent == ~0)
2641 conn->sent = 0;
2642 }
2643 }
2644}
2645
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002646static inline void hci_sched_esco(struct hci_dev *hdev)
2647{
2648 struct hci_conn *conn;
2649 struct sk_buff *skb;
2650 int quote;
2651
2652 BT_DBG("%s", hdev->name);
2653
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002654 if (!hci_conn_num(hdev, ESCO_LINK))
2655 return;
2656
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002657 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2658 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2659 BT_DBG("skb %p len %d", skb, skb->len);
2660 hci_send_frame(skb);
2661
2662 conn->sent++;
2663 if (conn->sent == ~0)
2664 conn->sent = 0;
2665 }
2666 }
2667}
2668
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002669static inline void hci_sched_le(struct hci_dev *hdev)
2670{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002671 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002672 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002673 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674
2675 BT_DBG("%s", hdev->name);
2676
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002677 if (!hci_conn_num(hdev, LE_LINK))
2678 return;
2679
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002680 if (!test_bit(HCI_RAW, &hdev->flags)) {
2681 /* LE tx timeout must be longer than maximum
2682 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002683 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002684 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002685 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002686 }
2687
2688 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002689 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002690 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002691 u32 priority = (skb_peek(&chan->data_q))->priority;
2692 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002693 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2694 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002695
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002696 /* Stop if priority has changed */
2697 if (skb->priority < priority)
2698 break;
2699
2700 skb = skb_dequeue(&chan->data_q);
2701
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002702 hci_send_frame(skb);
2703 hdev->le_last_tx = jiffies;
2704
2705 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002706 chan->sent++;
2707 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708 }
2709 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002710
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002711 if (hdev->le_pkts)
2712 hdev->le_cnt = cnt;
2713 else
2714 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002715
2716 if (cnt != tmp)
2717 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002718}
2719
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002720static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002722 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 struct sk_buff *skb;
2724
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002725 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2726 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 /* Schedule queues and send stuff to HCI driver */
2729
2730 hci_sched_acl(hdev);
2731
2732 hci_sched_sco(hdev);
2733
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002734 hci_sched_esco(hdev);
2735
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002736 hci_sched_le(hdev);
2737
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 /* Send next queued raw (unknown type) packet */
2739 while ((skb = skb_dequeue(&hdev->raw_q)))
2740 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741}
2742
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002743/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744
2745/* ACL data packet */
2746static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2747{
2748 struct hci_acl_hdr *hdr = (void *) skb->data;
2749 struct hci_conn *conn;
2750 __u16 handle, flags;
2751
2752 skb_pull(skb, HCI_ACL_HDR_SIZE);
2753
2754 handle = __le16_to_cpu(hdr->handle);
2755 flags = hci_flags(handle);
2756 handle = hci_handle(handle);
2757
2758 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2759
2760 hdev->stat.acl_rx++;
2761
2762 hci_dev_lock(hdev);
2763 conn = hci_conn_hash_lookup_handle(hdev, handle);
2764 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002765
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002767 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002768
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002770 l2cap_recv_acldata(conn, skb, flags);
2771 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002773 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 hdev->name, handle);
2775 }
2776
2777 kfree_skb(skb);
2778}
2779
2780/* SCO data packet */
2781static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2782{
2783 struct hci_sco_hdr *hdr = (void *) skb->data;
2784 struct hci_conn *conn;
2785 __u16 handle;
2786
2787 skb_pull(skb, HCI_SCO_HDR_SIZE);
2788
2789 handle = __le16_to_cpu(hdr->handle);
2790
2791 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2792
2793 hdev->stat.sco_rx++;
2794
2795 hci_dev_lock(hdev);
2796 conn = hci_conn_hash_lookup_handle(hdev, handle);
2797 hci_dev_unlock(hdev);
2798
2799 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002801 sco_recv_scodata(conn, skb);
2802 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002804 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 hdev->name, handle);
2806 }
2807
2808 kfree_skb(skb);
2809}
2810
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002811static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002813 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 struct sk_buff *skb;
2815
2816 BT_DBG("%s", hdev->name);
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 while ((skb = skb_dequeue(&hdev->rx_q))) {
2819 if (atomic_read(&hdev->promisc)) {
2820 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002821 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 }
2823
2824 if (test_bit(HCI_RAW, &hdev->flags)) {
2825 kfree_skb(skb);
2826 continue;
2827 }
2828
2829 if (test_bit(HCI_INIT, &hdev->flags)) {
2830 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002831 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 case HCI_ACLDATA_PKT:
2833 case HCI_SCODATA_PKT:
2834 kfree_skb(skb);
2835 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 }
2838
2839 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002840 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002842 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 hci_event_packet(hdev, skb);
2844 break;
2845
2846 case HCI_ACLDATA_PKT:
2847 BT_DBG("%s ACL data packet", hdev->name);
2848 hci_acldata_packet(hdev, skb);
2849 break;
2850
2851 case HCI_SCODATA_PKT:
2852 BT_DBG("%s SCO data packet", hdev->name);
2853 hci_scodata_packet(hdev, skb);
2854 break;
2855
2856 default:
2857 kfree_skb(skb);
2858 break;
2859 }
2860 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861}
2862
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002863static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002865 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 struct sk_buff *skb;
2867
2868 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2869
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002871 if (atomic_read(&hdev->cmd_cnt)) {
2872 skb = skb_dequeue(&hdev->cmd_q);
2873 if (!skb)
2874 return;
2875
Wei Yongjun7585b972009-02-25 18:29:52 +08002876 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002878 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2879 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 atomic_dec(&hdev->cmd_cnt);
2881 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002882 if (test_bit(HCI_RESET, &hdev->flags))
2883 del_timer(&hdev->cmd_timer);
2884 else
2885 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002886 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 } else {
2888 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002889 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 }
2891 }
2892}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002893
2894int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2895{
2896 /* General inquiry access code (GIAC) */
2897 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2898 struct hci_cp_inquiry cp;
2899
2900 BT_DBG("%s", hdev->name);
2901
2902 if (test_bit(HCI_INQUIRY, &hdev->flags))
2903 return -EINPROGRESS;
2904
Johan Hedberg46632622012-01-02 16:06:08 +02002905 inquiry_cache_flush(hdev);
2906
Andre Guedes2519a1f2011-11-07 11:45:24 -03002907 memset(&cp, 0, sizeof(cp));
2908 memcpy(&cp.lap, lap, sizeof(cp.lap));
2909 cp.length = length;
2910
2911 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2912}
Andre Guedes023d50492011-11-04 14:16:52 -03002913
2914int hci_cancel_inquiry(struct hci_dev *hdev)
2915{
2916 BT_DBG("%s", hdev->name);
2917
2918 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2919 return -EPERM;
2920
2921 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2922}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002923
2924module_param(enable_hs, bool, 0644);
2925MODULE_PARM_DESC(enable_hs, "Enable High Speed");