blob: 76dc1538e321650a7c76c43ae8714ef096a6c750 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Fabio Estevam8b281b92012-01-10 18:33:50 -020058bool enable_hs;
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020059
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_RESOLVING)
365 return true;
366
367 return false;
368}
369
Johan Hedbergff9ef572012-01-04 14:23:45 +0200370void hci_discovery_set_state(struct hci_dev *hdev, int state)
371{
372 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374 if (hdev->discovery.state == state)
375 return;
376
377 switch (state) {
378 case DISCOVERY_STOPPED:
379 mgmt_discovering(hdev, 0);
380 break;
381 case DISCOVERY_STARTING:
382 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200383 case DISCOVERY_INQUIRY:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200384 mgmt_discovering(hdev, 1);
385 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200386 case DISCOVERY_RESOLVING:
387 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200388 case DISCOVERY_STOPPING:
389 break;
390 }
391
392 hdev->discovery.state = state;
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395static void inquiry_cache_flush(struct hci_dev *hdev)
396{
Johan Hedberg30883512012-01-04 14:16:21 +0200397 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200398 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Johan Hedberg561aafb2012-01-04 13:31:59 +0200400 list_for_each_entry_safe(p, n, &cache->all, all) {
401 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200402 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200404
405 INIT_LIST_HEAD(&cache->unknown);
406 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200407 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
410struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411{
Johan Hedberg30883512012-01-04 14:16:21 +0200412 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 struct inquiry_entry *e;
414
415 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200419 return e;
420 }
421
422 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426 bdaddr_t *bdaddr)
427{
Johan Hedberg30883512012-01-04 14:16:21 +0200428 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200429 struct inquiry_entry *e;
430
431 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433 list_for_each_entry(e, &cache->unknown, list) {
434 if (!bacmp(&e->data.bdaddr, bdaddr))
435 return e;
436 }
437
438 return NULL;
439}
440
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200441struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442 bdaddr_t *bdaddr,
443 int state)
444{
445 struct discovery_state *cache = &hdev->discovery;
446 struct inquiry_entry *e;
447
448 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450 list_for_each_entry(e, &cache->resolve, list) {
451 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452 return e;
453 if (!bacmp(&e->data.bdaddr, bdaddr))
454 return e;
455 }
456
457 return NULL;
458}
459
Johan Hedberga3d4e202012-01-09 00:53:02 +0200460void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
461 struct inquiry_entry *ie)
462{
463 struct discovery_state *cache = &hdev->discovery;
464 struct list_head *pos = &cache->resolve;
465 struct inquiry_entry *p;
466
467 list_del(&ie->list);
468
469 list_for_each_entry(p, &cache->resolve, list) {
470 if (p->name_state != NAME_PENDING &&
471 abs(p->data.rssi) >= abs(ie->data.rssi))
472 break;
473 pos = &p->list;
474 }
475
476 list_add(&ie->list, pos);
477}
478
Johan Hedberg31754052012-01-04 13:39:52 +0200479bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200480 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Johan Hedberg30883512012-01-04 14:16:21 +0200482 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484
485 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200487 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 if (ie) {
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
493 }
494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200496 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
Johan Hedberg561aafb2012-01-04 13:31:59 +0200498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200501 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502
503 list_add(&ie->all, &cache->all);
504
505 if (name_known) {
506 ie->name_state = NAME_KNOWN;
507 } else {
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
510 }
511
512update:
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
516 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200522
523 if (ie->name_state == NAME_NOT_KNOWN)
524 return false;
525
526 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530{
Johan Hedberg30883512012-01-04 14:16:21 +0200531 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
534 int copied = 0;
535
Johan Hedberg561aafb2012-01-04 13:31:59 +0200536 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200538
539 if (copied >= num)
540 break;
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200550 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 BT_DBG("cache %p, copied %d", cache, copied);
554 return copied;
555}
556
557static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558{
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
561
562 BT_DBG("%s", hdev->name);
563
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
565 return;
566
567 /* Start Inquiry */
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574int hci_inquiry(void __user *arg)
575{
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
580 long timeo;
581 __u8 *buf;
582
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
584 return -EFAULT;
585
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200586 hdev = hci_dev_get(ir.dev_id);
587 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return -ENODEV;
589
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300590 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200592 inquiry_cache_empty(hdev) ||
593 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 inquiry_cache_flush(hdev);
595 do_inquiry = 1;
596 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300597 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598
Marcel Holtmann04837f62006-07-03 10:02:33 +0200599 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200600
601 if (do_inquiry) {
602 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
603 if (err < 0)
604 goto done;
605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
607 /* for unlimited number of responses we will use buffer with 255 entries */
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = -ENOMEM;
616 goto done;
617 }
618
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300621 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628 ir.num_rsp))
629 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900630 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200647 hdev = hci_dev_get(dev);
648 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200655 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
656 ret = -ERFKILL;
657 goto done;
658 }
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 if (test_bit(HCI_UP, &hdev->flags)) {
661 ret = -EALREADY;
662 goto done;
663 }
664
665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
666 set_bit(HCI_RAW, &hdev->flags);
667
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200668 /* Treat all non BR/EDR controllers as raw devices if
669 enable_hs is not set */
670 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100671 set_bit(HCI_RAW, &hdev->flags);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (hdev->open(hdev)) {
674 ret = -EIO;
675 goto done;
676 }
677
678 if (!test_bit(HCI_RAW, &hdev->flags)) {
679 atomic_set(&hdev->cmd_cnt, 1);
680 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200681 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
Marcel Holtmann04837f62006-07-03 10:02:33 +0200683 ret = __hci_request(hdev, hci_init_req, 0,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Andre Guedeseead27d2011-06-30 19:20:55 -0300686 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300687 ret = __hci_request(hdev, hci_le_init_req, 0,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 clear_bit(HCI_INIT, &hdev->flags);
691 }
692
693 if (!ret) {
694 hci_dev_hold(hdev);
695 set_bit(HCI_UP, &hdev->flags);
696 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200697 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300698 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200699 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300700 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200701 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900702 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200704 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200705 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400706 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
708 skb_queue_purge(&hdev->cmd_q);
709 skb_queue_purge(&hdev->rx_q);
710
711 if (hdev->flush)
712 hdev->flush(hdev);
713
714 if (hdev->sent_cmd) {
715 kfree_skb(hdev->sent_cmd);
716 hdev->sent_cmd = NULL;
717 }
718
719 hdev->close(hdev);
720 hdev->flags = 0;
721 }
722
723done:
724 hci_req_unlock(hdev);
725 hci_dev_put(hdev);
726 return ret;
727}
728
729static int hci_dev_do_close(struct hci_dev *hdev)
730{
731 BT_DBG("%s %p", hdev->name, hdev);
732
733 hci_req_cancel(hdev, ENODEV);
734 hci_req_lock(hdev);
735
736 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300737 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 hci_req_unlock(hdev);
739 return 0;
740 }
741
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200742 /* Flush RX and TX works */
743 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400744 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200746 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200747 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200748 hdev->discov_timeout = 0;
749 }
750
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200752 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200754 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200755 cancel_delayed_work(&hdev->service_cache);
756
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300757 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300760 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762 hci_notify(hdev, HCI_DEV_DOWN);
763
764 if (hdev->flush)
765 hdev->flush(hdev);
766
767 /* Reset device */
768 skb_queue_purge(&hdev->cmd_q);
769 atomic_set(&hdev->cmd_cnt, 1);
770 if (!test_bit(HCI_RAW, &hdev->flags)) {
771 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200772 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200773 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 clear_bit(HCI_INIT, &hdev->flags);
775 }
776
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200777 /* flush cmd work */
778 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 /* Drop queues */
781 skb_queue_purge(&hdev->rx_q);
782 skb_queue_purge(&hdev->cmd_q);
783 skb_queue_purge(&hdev->raw_q);
784
785 /* Drop last sent command */
786 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300787 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 kfree_skb(hdev->sent_cmd);
789 hdev->sent_cmd = NULL;
790 }
791
792 /* After this point our queues are empty
793 * and no tasks are scheduled. */
794 hdev->close(hdev);
795
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300796 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200797 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300798 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 /* Clear flags */
801 hdev->flags = 0;
802
803 hci_req_unlock(hdev);
804
805 hci_dev_put(hdev);
806 return 0;
807}
808
809int hci_dev_close(__u16 dev)
810{
811 struct hci_dev *hdev;
812 int err;
813
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200814 hdev = hci_dev_get(dev);
815 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 return -ENODEV;
817 err = hci_dev_do_close(hdev);
818 hci_dev_put(hdev);
819 return err;
820}
821
822int hci_dev_reset(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int ret = 0;
826
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200827 hdev = hci_dev_get(dev);
828 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return -ENODEV;
830
831 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832
833 if (!test_bit(HCI_UP, &hdev->flags))
834 goto done;
835
836 /* Drop queues */
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
839
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300840 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 inquiry_cache_flush(hdev);
842 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300843 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
845 if (hdev->flush)
846 hdev->flush(hdev);
847
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900848 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300849 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
851 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200852 ret = __hci_request(hdev, hci_reset_req, 0,
853 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 hci_req_unlock(hdev);
857 hci_dev_put(hdev);
858 return ret;
859}
860
861int hci_dev_reset_stat(__u16 dev)
862{
863 struct hci_dev *hdev;
864 int ret = 0;
865
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200866 hdev = hci_dev_get(dev);
867 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 return -ENODEV;
869
870 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
871
872 hci_dev_put(hdev);
873
874 return ret;
875}
876
877int hci_dev_cmd(unsigned int cmd, void __user *arg)
878{
879 struct hci_dev *hdev;
880 struct hci_dev_req dr;
881 int err = 0;
882
883 if (copy_from_user(&dr, arg, sizeof(dr)))
884 return -EFAULT;
885
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200886 hdev = hci_dev_get(dr.dev_id);
887 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 return -ENODEV;
889
890 switch (cmd) {
891 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200892 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
893 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 break;
895
896 case HCISETENCRYPT:
897 if (!lmp_encrypt_capable(hdev)) {
898 err = -EOPNOTSUPP;
899 break;
900 }
901
902 if (!test_bit(HCI_AUTH, &hdev->flags)) {
903 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200904 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
905 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 if (err)
907 break;
908 }
909
Marcel Holtmann04837f62006-07-03 10:02:33 +0200910 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 break;
913
914 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200915 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200919 case HCISETLINKPOL:
920 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
922 break;
923
924 case HCISETLINKMODE:
925 hdev->link_mode = ((__u16) dr.dev_opt) &
926 (HCI_LM_MASTER | HCI_LM_ACCEPT);
927 break;
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 case HCISETPTYPE:
930 hdev->pkt_type = (__u16) dr.dev_opt;
931 break;
932
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200934 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
935 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 break;
937
938 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200939 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
940 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 break;
942
943 default:
944 err = -EINVAL;
945 break;
946 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 hci_dev_put(hdev);
949 return err;
950}
951
952int hci_get_dev_list(void __user *arg)
953{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200954 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 struct hci_dev_list_req *dl;
956 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 int n = 0, size, err;
958 __u16 dev_num;
959
960 if (get_user(dev_num, (__u16 __user *) arg))
961 return -EFAULT;
962
963 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
964 return -EINVAL;
965
966 size = sizeof(*dl) + dev_num * sizeof(*dr);
967
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200968 dl = kzalloc(size, GFP_KERNEL);
969 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 return -ENOMEM;
971
972 dr = dl->dev_req;
973
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200974 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200975 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200976 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200977 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200978
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200979 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
980 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 (dr + n)->dev_id = hdev->id;
983 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 if (++n >= dev_num)
986 break;
987 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200988 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 dl->dev_num = n;
991 size = sizeof(*dl) + n * sizeof(*dr);
992
993 err = copy_to_user(arg, dl, size);
994 kfree(dl);
995
996 return err ? -EFAULT : 0;
997}
998
999int hci_get_dev_info(void __user *arg)
1000{
1001 struct hci_dev *hdev;
1002 struct hci_dev_info di;
1003 int err = 0;
1004
1005 if (copy_from_user(&di, arg, sizeof(di)))
1006 return -EFAULT;
1007
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001008 hdev = hci_dev_get(di.dev_id);
1009 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 return -ENODEV;
1011
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001012 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001013 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001014
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 strcpy(di.name, hdev->name);
1019 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001020 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 di.flags = hdev->flags;
1022 di.pkt_type = hdev->pkt_type;
1023 di.acl_mtu = hdev->acl_mtu;
1024 di.acl_pkts = hdev->acl_pkts;
1025 di.sco_mtu = hdev->sco_mtu;
1026 di.sco_pkts = hdev->sco_pkts;
1027 di.link_policy = hdev->link_policy;
1028 di.link_mode = hdev->link_mode;
1029
1030 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1031 memcpy(&di.features, &hdev->features, sizeof(di.features));
1032
1033 if (copy_to_user(arg, &di, sizeof(di)))
1034 err = -EFAULT;
1035
1036 hci_dev_put(hdev);
1037
1038 return err;
1039}
1040
1041/* ---- Interface to HCI drivers ---- */
1042
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001043static int hci_rfkill_set_block(void *data, bool blocked)
1044{
1045 struct hci_dev *hdev = data;
1046
1047 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1048
1049 if (!blocked)
1050 return 0;
1051
1052 hci_dev_do_close(hdev);
1053
1054 return 0;
1055}
1056
1057static const struct rfkill_ops hci_rfkill_ops = {
1058 .set_block = hci_rfkill_set_block,
1059};
1060
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061/* Alloc HCI device */
1062struct hci_dev *hci_alloc_dev(void)
1063{
1064 struct hci_dev *hdev;
1065
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001066 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 if (!hdev)
1068 return NULL;
1069
David Herrmann0ac7e702011-10-08 14:58:47 +02001070 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 skb_queue_head_init(&hdev->driver_init);
1072
1073 return hdev;
1074}
1075EXPORT_SYMBOL(hci_alloc_dev);
1076
1077/* Free HCI device */
1078void hci_free_dev(struct hci_dev *hdev)
1079{
1080 skb_queue_purge(&hdev->driver_init);
1081
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001082 /* will free via device release */
1083 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084}
1085EXPORT_SYMBOL(hci_free_dev);
1086
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001087static void hci_power_on(struct work_struct *work)
1088{
1089 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1090
1091 BT_DBG("%s", hdev->name);
1092
1093 if (hci_dev_open(hdev->id) < 0)
1094 return;
1095
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001096 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001097 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001098 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001099
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001100 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001101 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001102}
1103
1104static void hci_power_off(struct work_struct *work)
1105{
Johan Hedberg32435532011-11-07 22:16:04 +02001106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1107 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001108
1109 BT_DBG("%s", hdev->name);
1110
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001111 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg32435532011-11-07 22:16:04 +02001112
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001113 hci_dev_close(hdev->id);
1114}
1115
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001116static void hci_discov_off(struct work_struct *work)
1117{
1118 struct hci_dev *hdev;
1119 u8 scan = SCAN_PAGE;
1120
1121 hdev = container_of(work, struct hci_dev, discov_off.work);
1122
1123 BT_DBG("%s", hdev->name);
1124
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001125 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001126
1127 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1128
1129 hdev->discov_timeout = 0;
1130
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001131 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001132}
1133
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001134int hci_uuids_clear(struct hci_dev *hdev)
1135{
1136 struct list_head *p, *n;
1137
1138 list_for_each_safe(p, n, &hdev->uuids) {
1139 struct bt_uuid *uuid;
1140
1141 uuid = list_entry(p, struct bt_uuid, list);
1142
1143 list_del(p);
1144 kfree(uuid);
1145 }
1146
1147 return 0;
1148}
1149
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001150int hci_link_keys_clear(struct hci_dev *hdev)
1151{
1152 struct list_head *p, *n;
1153
1154 list_for_each_safe(p, n, &hdev->link_keys) {
1155 struct link_key *key;
1156
1157 key = list_entry(p, struct link_key, list);
1158
1159 list_del(p);
1160 kfree(key);
1161 }
1162
1163 return 0;
1164}
1165
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001166int hci_smp_ltks_clear(struct hci_dev *hdev)
1167{
1168 struct smp_ltk *k, *tmp;
1169
1170 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1171 list_del(&k->list);
1172 kfree(k);
1173 }
1174
1175 return 0;
1176}
1177
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001178struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1179{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001180 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001181
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001182 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001183 if (bacmp(bdaddr, &k->bdaddr) == 0)
1184 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001185
1186 return NULL;
1187}
1188
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001189static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1190 u8 key_type, u8 old_key_type)
1191{
1192 /* Legacy key */
1193 if (key_type < 0x03)
1194 return 1;
1195
1196 /* Debug keys are insecure so don't store them persistently */
1197 if (key_type == HCI_LK_DEBUG_COMBINATION)
1198 return 0;
1199
1200 /* Changed combination key and there's no previous one */
1201 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1202 return 0;
1203
1204 /* Security mode 3 case */
1205 if (!conn)
1206 return 1;
1207
1208 /* Neither local nor remote side had no-bonding as requirement */
1209 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1210 return 1;
1211
1212 /* Local side had dedicated bonding as requirement */
1213 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1214 return 1;
1215
1216 /* Remote side had dedicated bonding as requirement */
1217 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1218 return 1;
1219
1220 /* If none of the above criteria match, then don't store the key
1221 * persistently */
1222 return 0;
1223}
1224
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001225struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001227 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 list_for_each_entry(k, &hdev->long_term_keys, list) {
1230 if (k->ediv != ediv ||
1231 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232 continue;
1233
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001234 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001235 }
1236
1237 return NULL;
1238}
1239EXPORT_SYMBOL(hci_find_ltk);
1240
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001241struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1242 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001244 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001245
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001246 list_for_each_entry(k, &hdev->long_term_keys, list)
1247 if (addr_type == k->bdaddr_type &&
1248 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001249 return k;
1250
1251 return NULL;
1252}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001253EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001254
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001255int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1256 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001257{
1258 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001259 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001260
1261 old_key = hci_find_link_key(hdev, bdaddr);
1262 if (old_key) {
1263 old_key_type = old_key->type;
1264 key = old_key;
1265 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001266 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001267 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1268 if (!key)
1269 return -ENOMEM;
1270 list_add(&key->list, &hdev->link_keys);
1271 }
1272
1273 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1274
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001275 /* Some buggy controller combinations generate a changed
1276 * combination key for legacy pairing even when there's no
1277 * previous key */
1278 if (type == HCI_LK_CHANGED_COMBINATION &&
1279 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001280 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001281 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001282 if (conn)
1283 conn->key_type = type;
1284 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001285
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001286 bacpy(&key->bdaddr, bdaddr);
1287 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001288 key->pin_len = pin_len;
1289
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001290 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001291 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001292 else
1293 key->type = type;
1294
Johan Hedberg4df378a2011-04-28 11:29:03 -07001295 if (!new_key)
1296 return 0;
1297
1298 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1299
Johan Hedberg744cf192011-11-08 20:40:14 +02001300 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001301
1302 if (!persistent) {
1303 list_del(&key->list);
1304 kfree(key);
1305 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001306
1307 return 0;
1308}
1309
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1311 int new_key, u8 authenticated, u8 tk[16],
1312 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001313{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001314 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001316 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1317 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1320 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 else {
1323 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324 if (!key)
1325 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001326 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001327 }
1328
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001330 key->bdaddr_type = addr_type;
1331 memcpy(key->val, tk, sizeof(key->val));
1332 key->authenticated = authenticated;
1333 key->ediv = ediv;
1334 key->enc_size = enc_size;
1335 key->type = type;
1336 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001338 if (!new_key)
1339 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001340
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001341 if (type & HCI_SMP_LTK)
1342 mgmt_new_ltk(hdev, key, 1);
1343
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001344 return 0;
1345}
1346
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001347int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348{
1349 struct link_key *key;
1350
1351 key = hci_find_link_key(hdev, bdaddr);
1352 if (!key)
1353 return -ENOENT;
1354
1355 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1356
1357 list_del(&key->list);
1358 kfree(key);
1359
1360 return 0;
1361}
1362
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001363int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1364{
1365 struct smp_ltk *k, *tmp;
1366
1367 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1368 if (bacmp(bdaddr, &k->bdaddr))
1369 continue;
1370
1371 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1372
1373 list_del(&k->list);
1374 kfree(k);
1375 }
1376
1377 return 0;
1378}
1379
Ville Tervo6bd32322011-02-16 16:32:41 +02001380/* HCI command timer function */
1381static void hci_cmd_timer(unsigned long arg)
1382{
1383 struct hci_dev *hdev = (void *) arg;
1384
1385 BT_ERR("%s command tx timeout", hdev->name);
1386 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001387 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001388}
1389
Szymon Janc2763eda2011-03-22 13:12:22 +01001390struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1391 bdaddr_t *bdaddr)
1392{
1393 struct oob_data *data;
1394
1395 list_for_each_entry(data, &hdev->remote_oob_data, list)
1396 if (bacmp(bdaddr, &data->bdaddr) == 0)
1397 return data;
1398
1399 return NULL;
1400}
1401
1402int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1403{
1404 struct oob_data *data;
1405
1406 data = hci_find_remote_oob_data(hdev, bdaddr);
1407 if (!data)
1408 return -ENOENT;
1409
1410 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1411
1412 list_del(&data->list);
1413 kfree(data);
1414
1415 return 0;
1416}
1417
1418int hci_remote_oob_data_clear(struct hci_dev *hdev)
1419{
1420 struct oob_data *data, *n;
1421
1422 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1423 list_del(&data->list);
1424 kfree(data);
1425 }
1426
1427 return 0;
1428}
1429
1430int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1431 u8 *randomizer)
1432{
1433 struct oob_data *data;
1434
1435 data = hci_find_remote_oob_data(hdev, bdaddr);
1436
1437 if (!data) {
1438 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1439 if (!data)
1440 return -ENOMEM;
1441
1442 bacpy(&data->bdaddr, bdaddr);
1443 list_add(&data->list, &hdev->remote_oob_data);
1444 }
1445
1446 memcpy(data->hash, hash, sizeof(data->hash));
1447 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1448
1449 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1450
1451 return 0;
1452}
1453
Antti Julkub2a66aa2011-06-15 12:01:14 +03001454struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1455 bdaddr_t *bdaddr)
1456{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001457 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001458
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001459 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001460 if (bacmp(bdaddr, &b->bdaddr) == 0)
1461 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462
1463 return NULL;
1464}
1465
1466int hci_blacklist_clear(struct hci_dev *hdev)
1467{
1468 struct list_head *p, *n;
1469
1470 list_for_each_safe(p, n, &hdev->blacklist) {
1471 struct bdaddr_list *b;
1472
1473 b = list_entry(p, struct bdaddr_list, list);
1474
1475 list_del(p);
1476 kfree(b);
1477 }
1478
1479 return 0;
1480}
1481
1482int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1483{
1484 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485
1486 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1487 return -EBADF;
1488
Antti Julku5e762442011-08-25 16:48:02 +03001489 if (hci_blacklist_lookup(hdev, bdaddr))
1490 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491
1492 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001493 if (!entry)
1494 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
1496 bacpy(&entry->bdaddr, bdaddr);
1497
1498 list_add(&entry->list, &hdev->blacklist);
1499
Johan Hedberg744cf192011-11-08 20:40:14 +02001500 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501}
1502
1503int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1504{
1505 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506
Szymon Janc1ec918c2011-11-16 09:32:21 +01001507 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001508 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509
1510 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001511 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001512 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513
1514 list_del(&entry->list);
1515 kfree(entry);
1516
Johan Hedberg744cf192011-11-08 20:40:14 +02001517 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001518}
1519
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001520static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001521{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001522 struct hci_dev *hdev = container_of(work, struct hci_dev,
1523 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001524
1525 hci_dev_lock(hdev);
1526
1527 hci_adv_entries_clear(hdev);
1528
1529 hci_dev_unlock(hdev);
1530}
1531
Andre Guedes76c86862011-05-26 16:23:50 -03001532int hci_adv_entries_clear(struct hci_dev *hdev)
1533{
1534 struct adv_entry *entry, *tmp;
1535
1536 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1537 list_del(&entry->list);
1538 kfree(entry);
1539 }
1540
1541 BT_DBG("%s adv cache cleared", hdev->name);
1542
1543 return 0;
1544}
1545
1546struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1547{
1548 struct adv_entry *entry;
1549
1550 list_for_each_entry(entry, &hdev->adv_entries, list)
1551 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1552 return entry;
1553
1554 return NULL;
1555}
1556
1557static inline int is_connectable_adv(u8 evt_type)
1558{
1559 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1560 return 1;
1561
1562 return 0;
1563}
1564
1565int hci_add_adv_entry(struct hci_dev *hdev,
1566 struct hci_ev_le_advertising_info *ev)
1567{
1568 struct adv_entry *entry;
1569
1570 if (!is_connectable_adv(ev->evt_type))
1571 return -EINVAL;
1572
1573 /* Only new entries should be added to adv_entries. So, if
1574 * bdaddr was found, don't add it. */
1575 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1576 return 0;
1577
Andre Guedes4777bfd2012-01-30 23:31:28 -03001578 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001579 if (!entry)
1580 return -ENOMEM;
1581
1582 bacpy(&entry->bdaddr, &ev->bdaddr);
1583 entry->bdaddr_type = ev->bdaddr_type;
1584
1585 list_add(&entry->list, &hdev->adv_entries);
1586
1587 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1588 batostr(&entry->bdaddr), entry->bdaddr_type);
1589
1590 return 0;
1591}
1592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593/* Register HCI device */
1594int hci_register_dev(struct hci_dev *hdev)
1595{
1596 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001597 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001599 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600
David Herrmann010666a2012-01-07 15:47:07 +01001601 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 return -EINVAL;
1603
Mat Martineau08add512011-11-02 16:18:36 -07001604 /* Do not allow HCI_AMP devices to register at index 0,
1605 * so the index can be used as the AMP controller ID.
1606 */
1607 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1608
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001609 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
1611 /* Find first available device id */
1612 list_for_each(p, &hci_dev_list) {
1613 if (list_entry(p, struct hci_dev, list)->id != id)
1614 break;
1615 head = p; id++;
1616 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 sprintf(hdev->name, "hci%d", id);
1619 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001620 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001622 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001625 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001627 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001629 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Marcel Holtmann04837f62006-07-03 10:02:33 +02001631 hdev->idle_timeout = 0;
1632 hdev->sniff_max_interval = 800;
1633 hdev->sniff_min_interval = 80;
1634
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001635 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001636 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001637 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001638
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
1640 skb_queue_head_init(&hdev->rx_q);
1641 skb_queue_head_init(&hdev->cmd_q);
1642 skb_queue_head_init(&hdev->raw_q);
1643
Ville Tervo6bd32322011-02-16 16:32:41 +02001644 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1645
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301646 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001647 hdev->reassembly[i] = NULL;
1648
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001650 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Johan Hedberg30883512012-01-04 14:16:21 +02001652 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653
1654 hci_conn_hash_init(hdev);
1655
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001656 INIT_LIST_HEAD(&hdev->mgmt_pending);
1657
David Millerea4bd8b2010-07-30 21:54:49 -07001658 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001659
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001660 INIT_LIST_HEAD(&hdev->uuids);
1661
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001662 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001663 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001664
Szymon Janc2763eda2011-03-22 13:12:22 +01001665 INIT_LIST_HEAD(&hdev->remote_oob_data);
1666
Andre Guedes76c86862011-05-26 16:23:50 -03001667 INIT_LIST_HEAD(&hdev->adv_entries);
1668
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001669 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001670 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001671 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001672
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001673 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1676
1677 atomic_set(&hdev->promisc, 0);
1678
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001679 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001681 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1682 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001683 if (!hdev->workqueue) {
1684 error = -ENOMEM;
1685 goto err;
1686 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001687
David Herrmann33ca9542011-10-08 14:58:49 +02001688 error = hci_add_sysfs(hdev);
1689 if (error < 0)
1690 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001692 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1693 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1694 if (hdev->rfkill) {
1695 if (rfkill_register(hdev->rfkill) < 0) {
1696 rfkill_destroy(hdev->rfkill);
1697 hdev->rfkill = NULL;
1698 }
1699 }
1700
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001701 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1702 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001703 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001704
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001706 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001709
David Herrmann33ca9542011-10-08 14:58:49 +02001710err_wqueue:
1711 destroy_workqueue(hdev->workqueue);
1712err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001713 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001714 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001715 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001716
David Herrmann33ca9542011-10-08 14:58:49 +02001717 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718}
1719EXPORT_SYMBOL(hci_register_dev);
1720
1721/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001722void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723{
Marcel Holtmannef222012007-07-11 06:42:04 +02001724 int i;
1725
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001726 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001728 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001730 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 hci_dev_do_close(hdev);
1733
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301734 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001735 kfree_skb(hdev->reassembly[i]);
1736
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001737 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001738 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001739 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001740 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001741 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001742 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001743
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001744 /* mgmt_index_removed should take care of emptying the
1745 * pending list */
1746 BUG_ON(!list_empty(&hdev->mgmt_pending));
1747
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 hci_notify(hdev, HCI_DEV_UNREG);
1749
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001750 if (hdev->rfkill) {
1751 rfkill_unregister(hdev->rfkill);
1752 rfkill_destroy(hdev->rfkill);
1753 }
1754
David Herrmannce242972011-10-08 14:58:48 +02001755 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001756
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001757 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001758
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001759 destroy_workqueue(hdev->workqueue);
1760
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001761 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001762 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001763 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001764 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001765 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001766 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001767 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001768 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001769
David Herrmanndc946bd2012-01-07 15:47:24 +01001770 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772EXPORT_SYMBOL(hci_unregister_dev);
1773
1774/* Suspend HCI device */
1775int hci_suspend_dev(struct hci_dev *hdev)
1776{
1777 hci_notify(hdev, HCI_DEV_SUSPEND);
1778 return 0;
1779}
1780EXPORT_SYMBOL(hci_suspend_dev);
1781
1782/* Resume HCI device */
1783int hci_resume_dev(struct hci_dev *hdev)
1784{
1785 hci_notify(hdev, HCI_DEV_RESUME);
1786 return 0;
1787}
1788EXPORT_SYMBOL(hci_resume_dev);
1789
Marcel Holtmann76bca882009-11-18 00:40:39 +01001790/* Receive frame from HCI drivers */
1791int hci_recv_frame(struct sk_buff *skb)
1792{
1793 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1794 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1795 && !test_bit(HCI_INIT, &hdev->flags))) {
1796 kfree_skb(skb);
1797 return -ENXIO;
1798 }
1799
1800 /* Incomming skb */
1801 bt_cb(skb)->incoming = 1;
1802
1803 /* Time stamp */
1804 __net_timestamp(skb);
1805
Marcel Holtmann76bca882009-11-18 00:40:39 +01001806 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001807 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001808
Marcel Holtmann76bca882009-11-18 00:40:39 +01001809 return 0;
1810}
1811EXPORT_SYMBOL(hci_recv_frame);
1812
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301813static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001814 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301815{
1816 int len = 0;
1817 int hlen = 0;
1818 int remain = count;
1819 struct sk_buff *skb;
1820 struct bt_skb_cb *scb;
1821
1822 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1823 index >= NUM_REASSEMBLY)
1824 return -EILSEQ;
1825
1826 skb = hdev->reassembly[index];
1827
1828 if (!skb) {
1829 switch (type) {
1830 case HCI_ACLDATA_PKT:
1831 len = HCI_MAX_FRAME_SIZE;
1832 hlen = HCI_ACL_HDR_SIZE;
1833 break;
1834 case HCI_EVENT_PKT:
1835 len = HCI_MAX_EVENT_SIZE;
1836 hlen = HCI_EVENT_HDR_SIZE;
1837 break;
1838 case HCI_SCODATA_PKT:
1839 len = HCI_MAX_SCO_SIZE;
1840 hlen = HCI_SCO_HDR_SIZE;
1841 break;
1842 }
1843
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001844 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301845 if (!skb)
1846 return -ENOMEM;
1847
1848 scb = (void *) skb->cb;
1849 scb->expect = hlen;
1850 scb->pkt_type = type;
1851
1852 skb->dev = (void *) hdev;
1853 hdev->reassembly[index] = skb;
1854 }
1855
1856 while (count) {
1857 scb = (void *) skb->cb;
1858 len = min(scb->expect, (__u16)count);
1859
1860 memcpy(skb_put(skb, len), data, len);
1861
1862 count -= len;
1863 data += len;
1864 scb->expect -= len;
1865 remain = count;
1866
1867 switch (type) {
1868 case HCI_EVENT_PKT:
1869 if (skb->len == HCI_EVENT_HDR_SIZE) {
1870 struct hci_event_hdr *h = hci_event_hdr(skb);
1871 scb->expect = h->plen;
1872
1873 if (skb_tailroom(skb) < scb->expect) {
1874 kfree_skb(skb);
1875 hdev->reassembly[index] = NULL;
1876 return -ENOMEM;
1877 }
1878 }
1879 break;
1880
1881 case HCI_ACLDATA_PKT:
1882 if (skb->len == HCI_ACL_HDR_SIZE) {
1883 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1884 scb->expect = __le16_to_cpu(h->dlen);
1885
1886 if (skb_tailroom(skb) < scb->expect) {
1887 kfree_skb(skb);
1888 hdev->reassembly[index] = NULL;
1889 return -ENOMEM;
1890 }
1891 }
1892 break;
1893
1894 case HCI_SCODATA_PKT:
1895 if (skb->len == HCI_SCO_HDR_SIZE) {
1896 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1897 scb->expect = h->dlen;
1898
1899 if (skb_tailroom(skb) < scb->expect) {
1900 kfree_skb(skb);
1901 hdev->reassembly[index] = NULL;
1902 return -ENOMEM;
1903 }
1904 }
1905 break;
1906 }
1907
1908 if (scb->expect == 0) {
1909 /* Complete frame */
1910
1911 bt_cb(skb)->pkt_type = type;
1912 hci_recv_frame(skb);
1913
1914 hdev->reassembly[index] = NULL;
1915 return remain;
1916 }
1917 }
1918
1919 return remain;
1920}
1921
Marcel Holtmannef222012007-07-11 06:42:04 +02001922int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1923{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301924 int rem = 0;
1925
Marcel Holtmannef222012007-07-11 06:42:04 +02001926 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1927 return -EILSEQ;
1928
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001929 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001930 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301931 if (rem < 0)
1932 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001933
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301934 data += (count - rem);
1935 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001936 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001937
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301938 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001939}
1940EXPORT_SYMBOL(hci_recv_fragment);
1941
Suraj Sumangala99811512010-07-14 13:02:19 +05301942#define STREAM_REASSEMBLY 0
1943
1944int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1945{
1946 int type;
1947 int rem = 0;
1948
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001949 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301950 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1951
1952 if (!skb) {
1953 struct { char type; } *pkt;
1954
1955 /* Start of the frame */
1956 pkt = data;
1957 type = pkt->type;
1958
1959 data++;
1960 count--;
1961 } else
1962 type = bt_cb(skb)->pkt_type;
1963
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001964 rem = hci_reassembly(hdev, type, data, count,
1965 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301966 if (rem < 0)
1967 return rem;
1968
1969 data += (count - rem);
1970 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001971 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301972
1973 return rem;
1974}
1975EXPORT_SYMBOL(hci_recv_stream_fragment);
1976
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977/* ---- Interface to upper protocols ---- */
1978
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979int hci_register_cb(struct hci_cb *cb)
1980{
1981 BT_DBG("%p name %s", cb, cb->name);
1982
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001983 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001985 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 return 0;
1988}
1989EXPORT_SYMBOL(hci_register_cb);
1990
1991int hci_unregister_cb(struct hci_cb *cb)
1992{
1993 BT_DBG("%p name %s", cb, cb->name);
1994
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001995 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001997 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 return 0;
2000}
2001EXPORT_SYMBOL(hci_unregister_cb);
2002
2003static int hci_send_frame(struct sk_buff *skb)
2004{
2005 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2006
2007 if (!hdev) {
2008 kfree_skb(skb);
2009 return -ENODEV;
2010 }
2011
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002012 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014 if (atomic_read(&hdev->promisc)) {
2015 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07002016 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002018 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019 }
2020
2021 /* Get rid of skb owner, prior to sending to the driver. */
2022 skb_orphan(skb);
2023
2024 return hdev->send(skb);
2025}
2026
2027/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002028int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
2030 int len = HCI_COMMAND_HDR_SIZE + plen;
2031 struct hci_command_hdr *hdr;
2032 struct sk_buff *skb;
2033
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002034 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035
2036 skb = bt_skb_alloc(len, GFP_ATOMIC);
2037 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002038 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return -ENOMEM;
2040 }
2041
2042 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002043 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 hdr->plen = plen;
2045
2046 if (plen)
2047 memcpy(skb_put(skb, plen), param, plen);
2048
2049 BT_DBG("skb len %d", skb->len);
2050
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002051 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002053
Johan Hedberga5040ef2011-01-10 13:28:59 +02002054 if (test_bit(HCI_INIT, &hdev->flags))
2055 hdev->init_last_cmd = opcode;
2056
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002058 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
2060 return 0;
2061}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
2063/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002064void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
2066 struct hci_command_hdr *hdr;
2067
2068 if (!hdev->sent_cmd)
2069 return NULL;
2070
2071 hdr = (void *) hdev->sent_cmd->data;
2072
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002073 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 return NULL;
2075
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002076 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
2078 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2079}
2080
2081/* Send ACL data */
2082static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2083{
2084 struct hci_acl_hdr *hdr;
2085 int len = skb->len;
2086
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002087 skb_push(skb, HCI_ACL_HDR_SIZE);
2088 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002089 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002090 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2091 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092}
2093
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002094static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2095 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096{
2097 struct hci_dev *hdev = conn->hdev;
2098 struct sk_buff *list;
2099
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002100 list = skb_shinfo(skb)->frag_list;
2101 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 /* Non fragmented */
2103 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2104
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002105 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 } else {
2107 /* Fragmented */
2108 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2109
2110 skb_shinfo(skb)->frag_list = NULL;
2111
2112 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002113 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002115 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002116
2117 flags &= ~ACL_START;
2118 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 do {
2120 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002123 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002124 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2127
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002128 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 } while (list);
2130
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002131 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002133}
2134
2135void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2136{
2137 struct hci_conn *conn = chan->conn;
2138 struct hci_dev *hdev = conn->hdev;
2139
2140 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2141
2142 skb->dev = (void *) hdev;
2143 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2144 hci_add_acl_hdr(skb, conn->handle, flags);
2145
2146 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002148 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
2150EXPORT_SYMBOL(hci_send_acl);
2151
2152/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002153void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154{
2155 struct hci_dev *hdev = conn->hdev;
2156 struct hci_sco_hdr hdr;
2157
2158 BT_DBG("%s len %d", hdev->name, skb->len);
2159
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002160 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 hdr.dlen = skb->len;
2162
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002163 skb_push(skb, HCI_SCO_HDR_SIZE);
2164 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002165 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166
2167 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002168 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002171 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172}
2173EXPORT_SYMBOL(hci_send_sco);
2174
2175/* ---- HCI TX task (outgoing data) ---- */
2176
2177/* HCI Connection scheduler */
2178static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2179{
2180 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002181 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002184 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002186
2187 rcu_read_lock();
2188
2189 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002190 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002192
2193 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2194 continue;
2195
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 num++;
2197
2198 if (c->sent < min) {
2199 min = c->sent;
2200 conn = c;
2201 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002202
2203 if (hci_conn_num(hdev, type) == num)
2204 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 }
2206
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002207 rcu_read_unlock();
2208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002210 int cnt, q;
2211
2212 switch (conn->type) {
2213 case ACL_LINK:
2214 cnt = hdev->acl_cnt;
2215 break;
2216 case SCO_LINK:
2217 case ESCO_LINK:
2218 cnt = hdev->sco_cnt;
2219 break;
2220 case LE_LINK:
2221 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2222 break;
2223 default:
2224 cnt = 0;
2225 BT_ERR("Unknown link type");
2226 }
2227
2228 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 *quote = q ? q : 1;
2230 } else
2231 *quote = 0;
2232
2233 BT_DBG("conn %p quote %d", conn, *quote);
2234 return conn;
2235}
2236
Ville Tervobae1f5d2011-02-10 22:38:53 -03002237static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238{
2239 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002240 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Ville Tervobae1f5d2011-02-10 22:38:53 -03002242 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002244 rcu_read_lock();
2245
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002247 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002248 if (c->type == type && c->sent) {
2249 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 hdev->name, batostr(&c->dst));
2251 hci_acl_disconn(c, 0x13);
2252 }
2253 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002254
2255 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256}
2257
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002258static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2259 int *quote)
2260{
2261 struct hci_conn_hash *h = &hdev->conn_hash;
2262 struct hci_chan *chan = NULL;
2263 int num = 0, min = ~0, cur_prio = 0;
2264 struct hci_conn *conn;
2265 int cnt, q, conn_num = 0;
2266
2267 BT_DBG("%s", hdev->name);
2268
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002269 rcu_read_lock();
2270
2271 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002272 struct hci_chan *tmp;
2273
2274 if (conn->type != type)
2275 continue;
2276
2277 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2278 continue;
2279
2280 conn_num++;
2281
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002282 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002283 struct sk_buff *skb;
2284
2285 if (skb_queue_empty(&tmp->data_q))
2286 continue;
2287
2288 skb = skb_peek(&tmp->data_q);
2289 if (skb->priority < cur_prio)
2290 continue;
2291
2292 if (skb->priority > cur_prio) {
2293 num = 0;
2294 min = ~0;
2295 cur_prio = skb->priority;
2296 }
2297
2298 num++;
2299
2300 if (conn->sent < min) {
2301 min = conn->sent;
2302 chan = tmp;
2303 }
2304 }
2305
2306 if (hci_conn_num(hdev, type) == conn_num)
2307 break;
2308 }
2309
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002310 rcu_read_unlock();
2311
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002312 if (!chan)
2313 return NULL;
2314
2315 switch (chan->conn->type) {
2316 case ACL_LINK:
2317 cnt = hdev->acl_cnt;
2318 break;
2319 case SCO_LINK:
2320 case ESCO_LINK:
2321 cnt = hdev->sco_cnt;
2322 break;
2323 case LE_LINK:
2324 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2325 break;
2326 default:
2327 cnt = 0;
2328 BT_ERR("Unknown link type");
2329 }
2330
2331 q = cnt / num;
2332 *quote = q ? q : 1;
2333 BT_DBG("chan %p quote %d", chan, *quote);
2334 return chan;
2335}
2336
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002337static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2338{
2339 struct hci_conn_hash *h = &hdev->conn_hash;
2340 struct hci_conn *conn;
2341 int num = 0;
2342
2343 BT_DBG("%s", hdev->name);
2344
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002345 rcu_read_lock();
2346
2347 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002348 struct hci_chan *chan;
2349
2350 if (conn->type != type)
2351 continue;
2352
2353 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2354 continue;
2355
2356 num++;
2357
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002358 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002359 struct sk_buff *skb;
2360
2361 if (chan->sent) {
2362 chan->sent = 0;
2363 continue;
2364 }
2365
2366 if (skb_queue_empty(&chan->data_q))
2367 continue;
2368
2369 skb = skb_peek(&chan->data_q);
2370 if (skb->priority >= HCI_PRIO_MAX - 1)
2371 continue;
2372
2373 skb->priority = HCI_PRIO_MAX - 1;
2374
2375 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2376 skb->priority);
2377 }
2378
2379 if (hci_conn_num(hdev, type) == num)
2380 break;
2381 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002382
2383 rcu_read_unlock();
2384
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002385}
2386
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387static inline void hci_sched_acl(struct hci_dev *hdev)
2388{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002389 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390 struct sk_buff *skb;
2391 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002392 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 BT_DBG("%s", hdev->name);
2395
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002396 if (!hci_conn_num(hdev, ACL_LINK))
2397 return;
2398
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 if (!test_bit(HCI_RAW, &hdev->flags)) {
2400 /* ACL tx timeout must be longer than maximum
2401 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002402 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx +
2403 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002404 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 }
2406
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002407 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002408
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002409 while (hdev->acl_cnt &&
2410 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002411 u32 priority = (skb_peek(&chan->data_q))->priority;
2412 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002413 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2414 skb->len, skb->priority);
2415
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002416 /* Stop if priority has changed */
2417 if (skb->priority < priority)
2418 break;
2419
2420 skb = skb_dequeue(&chan->data_q);
2421
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002422 hci_conn_enter_active_mode(chan->conn,
2423 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002424
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 hci_send_frame(skb);
2426 hdev->acl_last_tx = jiffies;
2427
2428 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002429 chan->sent++;
2430 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 }
2432 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002433
2434 if (cnt != hdev->acl_cnt)
2435 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436}
2437
2438/* Schedule SCO */
2439static inline void hci_sched_sco(struct hci_dev *hdev)
2440{
2441 struct hci_conn *conn;
2442 struct sk_buff *skb;
2443 int quote;
2444
2445 BT_DBG("%s", hdev->name);
2446
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002447 if (!hci_conn_num(hdev, SCO_LINK))
2448 return;
2449
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2451 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2452 BT_DBG("skb %p len %d", skb, skb->len);
2453 hci_send_frame(skb);
2454
2455 conn->sent++;
2456 if (conn->sent == ~0)
2457 conn->sent = 0;
2458 }
2459 }
2460}
2461
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002462static inline void hci_sched_esco(struct hci_dev *hdev)
2463{
2464 struct hci_conn *conn;
2465 struct sk_buff *skb;
2466 int quote;
2467
2468 BT_DBG("%s", hdev->name);
2469
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002470 if (!hci_conn_num(hdev, ESCO_LINK))
2471 return;
2472
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002473 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2474 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2475 BT_DBG("skb %p len %d", skb, skb->len);
2476 hci_send_frame(skb);
2477
2478 conn->sent++;
2479 if (conn->sent == ~0)
2480 conn->sent = 0;
2481 }
2482 }
2483}
2484
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002485static inline void hci_sched_le(struct hci_dev *hdev)
2486{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002487 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002488 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002489 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002490
2491 BT_DBG("%s", hdev->name);
2492
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002493 if (!hci_conn_num(hdev, LE_LINK))
2494 return;
2495
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002496 if (!test_bit(HCI_RAW, &hdev->flags)) {
2497 /* LE tx timeout must be longer than maximum
2498 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002499 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002500 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002501 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002502 }
2503
2504 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002505 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002506 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002507 u32 priority = (skb_peek(&chan->data_q))->priority;
2508 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002509 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2510 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002511
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002512 /* Stop if priority has changed */
2513 if (skb->priority < priority)
2514 break;
2515
2516 skb = skb_dequeue(&chan->data_q);
2517
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002518 hci_send_frame(skb);
2519 hdev->le_last_tx = jiffies;
2520
2521 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002522 chan->sent++;
2523 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002524 }
2525 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002526
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002527 if (hdev->le_pkts)
2528 hdev->le_cnt = cnt;
2529 else
2530 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002531
2532 if (cnt != tmp)
2533 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002534}
2535
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002536static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002538 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 struct sk_buff *skb;
2540
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002541 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2542 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
2544 /* Schedule queues and send stuff to HCI driver */
2545
2546 hci_sched_acl(hdev);
2547
2548 hci_sched_sco(hdev);
2549
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002550 hci_sched_esco(hdev);
2551
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002552 hci_sched_le(hdev);
2553
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 /* Send next queued raw (unknown type) packet */
2555 while ((skb = skb_dequeue(&hdev->raw_q)))
2556 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557}
2558
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002559/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
2561/* ACL data packet */
2562static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2563{
2564 struct hci_acl_hdr *hdr = (void *) skb->data;
2565 struct hci_conn *conn;
2566 __u16 handle, flags;
2567
2568 skb_pull(skb, HCI_ACL_HDR_SIZE);
2569
2570 handle = __le16_to_cpu(hdr->handle);
2571 flags = hci_flags(handle);
2572 handle = hci_handle(handle);
2573
2574 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2575
2576 hdev->stat.acl_rx++;
2577
2578 hci_dev_lock(hdev);
2579 conn = hci_conn_hash_lookup_handle(hdev, handle);
2580 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002583 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002584
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002586 l2cap_recv_acldata(conn, skb, flags);
2587 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002589 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 hdev->name, handle);
2591 }
2592
2593 kfree_skb(skb);
2594}
2595
2596/* SCO data packet */
2597static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2598{
2599 struct hci_sco_hdr *hdr = (void *) skb->data;
2600 struct hci_conn *conn;
2601 __u16 handle;
2602
2603 skb_pull(skb, HCI_SCO_HDR_SIZE);
2604
2605 handle = __le16_to_cpu(hdr->handle);
2606
2607 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2608
2609 hdev->stat.sco_rx++;
2610
2611 hci_dev_lock(hdev);
2612 conn = hci_conn_hash_lookup_handle(hdev, handle);
2613 hci_dev_unlock(hdev);
2614
2615 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002617 sco_recv_scodata(conn, skb);
2618 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002620 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 hdev->name, handle);
2622 }
2623
2624 kfree_skb(skb);
2625}
2626
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002627static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002629 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 struct sk_buff *skb;
2631
2632 BT_DBG("%s", hdev->name);
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 while ((skb = skb_dequeue(&hdev->rx_q))) {
2635 if (atomic_read(&hdev->promisc)) {
2636 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002637 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 }
2639
2640 if (test_bit(HCI_RAW, &hdev->flags)) {
2641 kfree_skb(skb);
2642 continue;
2643 }
2644
2645 if (test_bit(HCI_INIT, &hdev->flags)) {
2646 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002647 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 case HCI_ACLDATA_PKT:
2649 case HCI_SCODATA_PKT:
2650 kfree_skb(skb);
2651 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002652 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 }
2654
2655 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002656 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002658 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 hci_event_packet(hdev, skb);
2660 break;
2661
2662 case HCI_ACLDATA_PKT:
2663 BT_DBG("%s ACL data packet", hdev->name);
2664 hci_acldata_packet(hdev, skb);
2665 break;
2666
2667 case HCI_SCODATA_PKT:
2668 BT_DBG("%s SCO data packet", hdev->name);
2669 hci_scodata_packet(hdev, skb);
2670 break;
2671
2672 default:
2673 kfree_skb(skb);
2674 break;
2675 }
2676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677}
2678
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002679static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002681 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 struct sk_buff *skb;
2683
2684 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002687 if (atomic_read(&hdev->cmd_cnt)) {
2688 skb = skb_dequeue(&hdev->cmd_q);
2689 if (!skb)
2690 return;
2691
Wei Yongjun7585b972009-02-25 18:29:52 +08002692 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002694 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2695 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696 atomic_dec(&hdev->cmd_cnt);
2697 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002698 if (test_bit(HCI_RESET, &hdev->flags))
2699 del_timer(&hdev->cmd_timer);
2700 else
2701 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002702 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 } else {
2704 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002705 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 }
2707 }
2708}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002709
2710int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2711{
2712 /* General inquiry access code (GIAC) */
2713 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2714 struct hci_cp_inquiry cp;
2715
2716 BT_DBG("%s", hdev->name);
2717
2718 if (test_bit(HCI_INQUIRY, &hdev->flags))
2719 return -EINPROGRESS;
2720
Johan Hedberg46632622012-01-02 16:06:08 +02002721 inquiry_cache_flush(hdev);
2722
Andre Guedes2519a1f2011-11-07 11:45:24 -03002723 memset(&cp, 0, sizeof(cp));
2724 memcpy(&cp.lap, lap, sizeof(cp.lap));
2725 cp.length = length;
2726
2727 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2728}
Andre Guedes023d5042011-11-04 14:16:52 -03002729
2730int hci_cancel_inquiry(struct hci_dev *hdev)
2731{
2732 BT_DBG("%s", hdev->name);
2733
2734 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2735 return -EPERM;
2736
2737 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2738}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002739
2740module_param(enable_hs, bool, 0644);
2741MODULE_PARM_DESC(enable_hs, "Enable High Speed");