blob: 546a42941477cdcea6fddcb709b6aaac573a4867 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200358
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200359bool hci_discovery_active(struct hci_dev *hdev)
360{
361 struct discovery_state *discov = &hdev->discovery;
362
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_RESOLVING)
365 return true;
366
367 return false;
368}
369
Johan Hedbergff9ef572012-01-04 14:23:45 +0200370void hci_discovery_set_state(struct hci_dev *hdev, int state)
371{
372 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374 if (hdev->discovery.state == state)
375 return;
376
377 switch (state) {
378 case DISCOVERY_STOPPED:
379 mgmt_discovering(hdev, 0);
380 break;
381 case DISCOVERY_STARTING:
382 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200383 case DISCOVERY_INQUIRY:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200384 mgmt_discovering(hdev, 1);
385 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200386 case DISCOVERY_RESOLVING:
387 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200388 case DISCOVERY_STOPPING:
389 break;
390 }
391
392 hdev->discovery.state = state;
393}
394
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395static void inquiry_cache_flush(struct hci_dev *hdev)
396{
Johan Hedberg30883512012-01-04 14:16:21 +0200397 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200398 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Johan Hedberg561aafb2012-01-04 13:31:59 +0200400 list_for_each_entry_safe(p, n, &cache->all, all) {
401 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200402 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200404
405 INIT_LIST_HEAD(&cache->unknown);
406 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200407 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408}
409
410struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411{
Johan Hedberg30883512012-01-04 14:16:21 +0200412 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 struct inquiry_entry *e;
414
415 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200419 return e;
420 }
421
422 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423}
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426 bdaddr_t *bdaddr)
427{
Johan Hedberg30883512012-01-04 14:16:21 +0200428 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200429 struct inquiry_entry *e;
430
431 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433 list_for_each_entry(e, &cache->unknown, list) {
434 if (!bacmp(&e->data.bdaddr, bdaddr))
435 return e;
436 }
437
438 return NULL;
439}
440
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200441struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442 bdaddr_t *bdaddr,
443 int state)
444{
445 struct discovery_state *cache = &hdev->discovery;
446 struct inquiry_entry *e;
447
448 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450 list_for_each_entry(e, &cache->resolve, list) {
451 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452 return e;
453 if (!bacmp(&e->data.bdaddr, bdaddr))
454 return e;
455 }
456
457 return NULL;
458}
459
Johan Hedberg31754052012-01-04 13:39:52 +0200460bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200461 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462{
Johan Hedberg30883512012-01-04 14:16:21 +0200463 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200464 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465
466 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
467
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200468 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200469 if (ie)
470 goto update;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200471
Johan Hedberg561aafb2012-01-04 13:31:59 +0200472 /* Entry not in the cache. Add new one. */
473 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
474 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200475 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200476
477 list_add(&ie->all, &cache->all);
478
479 if (name_known) {
480 ie->name_state = NAME_KNOWN;
481 } else {
482 ie->name_state = NAME_NOT_KNOWN;
483 list_add(&ie->list, &cache->unknown);
484 }
485
486update:
487 if (name_known && ie->name_state != NAME_KNOWN &&
488 ie->name_state != NAME_PENDING) {
489 ie->name_state = NAME_KNOWN;
490 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
492
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200493 memcpy(&ie->data, data, sizeof(*data));
494 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200496
497 if (ie->name_state == NAME_NOT_KNOWN)
498 return false;
499
500 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501}
502
503static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
504{
Johan Hedberg30883512012-01-04 14:16:21 +0200505 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 struct inquiry_info *info = (struct inquiry_info *) buf;
507 struct inquiry_entry *e;
508 int copied = 0;
509
Johan Hedberg561aafb2012-01-04 13:31:59 +0200510 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200512
513 if (copied >= num)
514 break;
515
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 bacpy(&info->bdaddr, &data->bdaddr);
517 info->pscan_rep_mode = data->pscan_rep_mode;
518 info->pscan_period_mode = data->pscan_period_mode;
519 info->pscan_mode = data->pscan_mode;
520 memcpy(info->dev_class, data->dev_class, 3);
521 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200522
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200524 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 }
526
527 BT_DBG("cache %p, copied %d", cache, copied);
528 return copied;
529}
530
531static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
532{
533 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
534 struct hci_cp_inquiry cp;
535
536 BT_DBG("%s", hdev->name);
537
538 if (test_bit(HCI_INQUIRY, &hdev->flags))
539 return;
540
541 /* Start Inquiry */
542 memcpy(&cp.lap, &ir->lap, 3);
543 cp.length = ir->length;
544 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200545 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
548int hci_inquiry(void __user *arg)
549{
550 __u8 __user *ptr = arg;
551 struct hci_inquiry_req ir;
552 struct hci_dev *hdev;
553 int err = 0, do_inquiry = 0, max_rsp;
554 long timeo;
555 __u8 *buf;
556
557 if (copy_from_user(&ir, ptr, sizeof(ir)))
558 return -EFAULT;
559
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200560 hdev = hci_dev_get(ir.dev_id);
561 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 return -ENODEV;
563
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300564 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900565 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200566 inquiry_cache_empty(hdev) ||
567 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 inquiry_cache_flush(hdev);
569 do_inquiry = 1;
570 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300571 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Marcel Holtmann04837f62006-07-03 10:02:33 +0200573 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200574
575 if (do_inquiry) {
576 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
577 if (err < 0)
578 goto done;
579 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 /* for unlimited number of responses we will use buffer with 255 entries */
582 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
583
584 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
585 * copy it to the user space.
586 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100587 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200588 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 err = -ENOMEM;
590 goto done;
591 }
592
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300593 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
597 BT_DBG("num_rsp %d", ir.num_rsp);
598
599 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
600 ptr += sizeof(ir);
601 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
602 ir.num_rsp))
603 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900604 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 err = -EFAULT;
606
607 kfree(buf);
608
609done:
610 hci_dev_put(hdev);
611 return err;
612}
613
614/* ---- HCI ioctl helpers ---- */
615
616int hci_dev_open(__u16 dev)
617{
618 struct hci_dev *hdev;
619 int ret = 0;
620
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200621 hdev = hci_dev_get(dev);
622 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 return -ENODEV;
624
625 BT_DBG("%s %p", hdev->name, hdev);
626
627 hci_req_lock(hdev);
628
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200629 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
630 ret = -ERFKILL;
631 goto done;
632 }
633
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 if (test_bit(HCI_UP, &hdev->flags)) {
635 ret = -EALREADY;
636 goto done;
637 }
638
639 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
640 set_bit(HCI_RAW, &hdev->flags);
641
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200642 /* Treat all non BR/EDR controllers as raw devices if
643 enable_hs is not set */
644 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100645 set_bit(HCI_RAW, &hdev->flags);
646
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 if (hdev->open(hdev)) {
648 ret = -EIO;
649 goto done;
650 }
651
652 if (!test_bit(HCI_RAW, &hdev->flags)) {
653 atomic_set(&hdev->cmd_cnt, 1);
654 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200655 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Marcel Holtmann04837f62006-07-03 10:02:33 +0200657 ret = __hci_request(hdev, hci_init_req, 0,
658 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Andre Guedeseead27d2011-06-30 19:20:55 -0300660 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300661 ret = __hci_request(hdev, hci_le_init_req, 0,
662 msecs_to_jiffies(HCI_INIT_TIMEOUT));
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 clear_bit(HCI_INIT, &hdev->flags);
665 }
666
667 if (!ret) {
668 hci_dev_hold(hdev);
669 set_bit(HCI_UP, &hdev->flags);
670 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200671 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300672 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200673 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300674 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200675 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900676 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200678 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200679 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400680 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682 skb_queue_purge(&hdev->cmd_q);
683 skb_queue_purge(&hdev->rx_q);
684
685 if (hdev->flush)
686 hdev->flush(hdev);
687
688 if (hdev->sent_cmd) {
689 kfree_skb(hdev->sent_cmd);
690 hdev->sent_cmd = NULL;
691 }
692
693 hdev->close(hdev);
694 hdev->flags = 0;
695 }
696
697done:
698 hci_req_unlock(hdev);
699 hci_dev_put(hdev);
700 return ret;
701}
702
703static int hci_dev_do_close(struct hci_dev *hdev)
704{
705 BT_DBG("%s %p", hdev->name, hdev);
706
707 hci_req_cancel(hdev, ENODEV);
708 hci_req_lock(hdev);
709
710 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300711 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 hci_req_unlock(hdev);
713 return 0;
714 }
715
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200716 /* Flush RX and TX works */
717 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400718 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200720 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200721 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200722 hdev->discov_timeout = 0;
723 }
724
Johan Hedberg32435532011-11-07 22:16:04 +0200725 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200726 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200727
Johan Hedberg7d785252011-12-15 00:47:39 +0200728 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
729 cancel_delayed_work(&hdev->service_cache);
730
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300731 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 inquiry_cache_flush(hdev);
733 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300734 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 hci_notify(hdev, HCI_DEV_DOWN);
737
738 if (hdev->flush)
739 hdev->flush(hdev);
740
741 /* Reset device */
742 skb_queue_purge(&hdev->cmd_q);
743 atomic_set(&hdev->cmd_cnt, 1);
744 if (!test_bit(HCI_RAW, &hdev->flags)) {
745 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200746 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200747 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 clear_bit(HCI_INIT, &hdev->flags);
749 }
750
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200751 /* flush cmd work */
752 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 /* Drop queues */
755 skb_queue_purge(&hdev->rx_q);
756 skb_queue_purge(&hdev->cmd_q);
757 skb_queue_purge(&hdev->raw_q);
758
759 /* Drop last sent command */
760 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300761 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 kfree_skb(hdev->sent_cmd);
763 hdev->sent_cmd = NULL;
764 }
765
766 /* After this point our queues are empty
767 * and no tasks are scheduled. */
768 hdev->close(hdev);
769
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300770 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200771 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300772 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200773
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 /* Clear flags */
775 hdev->flags = 0;
776
777 hci_req_unlock(hdev);
778
779 hci_dev_put(hdev);
780 return 0;
781}
782
783int hci_dev_close(__u16 dev)
784{
785 struct hci_dev *hdev;
786 int err;
787
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200788 hdev = hci_dev_get(dev);
789 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 return -ENODEV;
791 err = hci_dev_do_close(hdev);
792 hci_dev_put(hdev);
793 return err;
794}
795
796int hci_dev_reset(__u16 dev)
797{
798 struct hci_dev *hdev;
799 int ret = 0;
800
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200801 hdev = hci_dev_get(dev);
802 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 return -ENODEV;
804
805 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
807 if (!test_bit(HCI_UP, &hdev->flags))
808 goto done;
809
810 /* Drop queues */
811 skb_queue_purge(&hdev->rx_q);
812 skb_queue_purge(&hdev->cmd_q);
813
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300814 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 inquiry_cache_flush(hdev);
816 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300817 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
819 if (hdev->flush)
820 hdev->flush(hdev);
821
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900822 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300823 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200826 ret = __hci_request(hdev, hci_reset_req, 0,
827 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 hci_req_unlock(hdev);
831 hci_dev_put(hdev);
832 return ret;
833}
834
835int hci_dev_reset_stat(__u16 dev)
836{
837 struct hci_dev *hdev;
838 int ret = 0;
839
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200840 hdev = hci_dev_get(dev);
841 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 return -ENODEV;
843
844 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
845
846 hci_dev_put(hdev);
847
848 return ret;
849}
850
851int hci_dev_cmd(unsigned int cmd, void __user *arg)
852{
853 struct hci_dev *hdev;
854 struct hci_dev_req dr;
855 int err = 0;
856
857 if (copy_from_user(&dr, arg, sizeof(dr)))
858 return -EFAULT;
859
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200860 hdev = hci_dev_get(dr.dev_id);
861 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 return -ENODEV;
863
864 switch (cmd) {
865 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200866 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
867 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 break;
869
870 case HCISETENCRYPT:
871 if (!lmp_encrypt_capable(hdev)) {
872 err = -EOPNOTSUPP;
873 break;
874 }
875
876 if (!test_bit(HCI_AUTH, &hdev->flags)) {
877 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200878 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
879 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 if (err)
881 break;
882 }
883
Marcel Holtmann04837f62006-07-03 10:02:33 +0200884 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 break;
887
888 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200889 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
890 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 break;
892
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200893 case HCISETLINKPOL:
894 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
895 msecs_to_jiffies(HCI_INIT_TIMEOUT));
896 break;
897
898 case HCISETLINKMODE:
899 hdev->link_mode = ((__u16) dr.dev_opt) &
900 (HCI_LM_MASTER | HCI_LM_ACCEPT);
901 break;
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 case HCISETPTYPE:
904 hdev->pkt_type = (__u16) dr.dev_opt;
905 break;
906
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200908 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
909 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 break;
911
912 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200913 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
914 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 break;
916
917 default:
918 err = -EINVAL;
919 break;
920 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 hci_dev_put(hdev);
923 return err;
924}
925
926int hci_get_dev_list(void __user *arg)
927{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200928 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 struct hci_dev_list_req *dl;
930 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 int n = 0, size, err;
932 __u16 dev_num;
933
934 if (get_user(dev_num, (__u16 __user *) arg))
935 return -EFAULT;
936
937 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
938 return -EINVAL;
939
940 size = sizeof(*dl) + dev_num * sizeof(*dr);
941
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200942 dl = kzalloc(size, GFP_KERNEL);
943 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 return -ENOMEM;
945
946 dr = dl->dev_req;
947
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200948 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200949 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200950 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200951 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200952
953 if (!test_bit(HCI_MGMT, &hdev->flags))
954 set_bit(HCI_PAIRABLE, &hdev->flags);
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 (dr + n)->dev_id = hdev->id;
957 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 if (++n >= dev_num)
960 break;
961 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200962 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963
964 dl->dev_num = n;
965 size = sizeof(*dl) + n * sizeof(*dr);
966
967 err = copy_to_user(arg, dl, size);
968 kfree(dl);
969
970 return err ? -EFAULT : 0;
971}
972
973int hci_get_dev_info(void __user *arg)
974{
975 struct hci_dev *hdev;
976 struct hci_dev_info di;
977 int err = 0;
978
979 if (copy_from_user(&di, arg, sizeof(di)))
980 return -EFAULT;
981
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200982 hdev = hci_dev_get(di.dev_id);
983 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 return -ENODEV;
985
Johan Hedberg32435532011-11-07 22:16:04 +0200986 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
987 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200988
Johan Hedbergc542a062011-01-26 13:11:03 +0200989 if (!test_bit(HCI_MGMT, &hdev->flags))
990 set_bit(HCI_PAIRABLE, &hdev->flags);
991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 strcpy(di.name, hdev->name);
993 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100994 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 di.flags = hdev->flags;
996 di.pkt_type = hdev->pkt_type;
997 di.acl_mtu = hdev->acl_mtu;
998 di.acl_pkts = hdev->acl_pkts;
999 di.sco_mtu = hdev->sco_mtu;
1000 di.sco_pkts = hdev->sco_pkts;
1001 di.link_policy = hdev->link_policy;
1002 di.link_mode = hdev->link_mode;
1003
1004 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1005 memcpy(&di.features, &hdev->features, sizeof(di.features));
1006
1007 if (copy_to_user(arg, &di, sizeof(di)))
1008 err = -EFAULT;
1009
1010 hci_dev_put(hdev);
1011
1012 return err;
1013}
1014
1015/* ---- Interface to HCI drivers ---- */
1016
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001017static int hci_rfkill_set_block(void *data, bool blocked)
1018{
1019 struct hci_dev *hdev = data;
1020
1021 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1022
1023 if (!blocked)
1024 return 0;
1025
1026 hci_dev_do_close(hdev);
1027
1028 return 0;
1029}
1030
1031static const struct rfkill_ops hci_rfkill_ops = {
1032 .set_block = hci_rfkill_set_block,
1033};
1034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035/* Alloc HCI device */
1036struct hci_dev *hci_alloc_dev(void)
1037{
1038 struct hci_dev *hdev;
1039
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001040 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 if (!hdev)
1042 return NULL;
1043
David Herrmann0ac7e702011-10-08 14:58:47 +02001044 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 skb_queue_head_init(&hdev->driver_init);
1046
1047 return hdev;
1048}
1049EXPORT_SYMBOL(hci_alloc_dev);
1050
1051/* Free HCI device */
1052void hci_free_dev(struct hci_dev *hdev)
1053{
1054 skb_queue_purge(&hdev->driver_init);
1055
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001056 /* will free via device release */
1057 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058}
1059EXPORT_SYMBOL(hci_free_dev);
1060
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001061static void hci_power_on(struct work_struct *work)
1062{
1063 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1064
1065 BT_DBG("%s", hdev->name);
1066
1067 if (hci_dev_open(hdev->id) < 0)
1068 return;
1069
1070 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001071 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001072 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001073
1074 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001075 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001076}
1077
1078static void hci_power_off(struct work_struct *work)
1079{
Johan Hedberg32435532011-11-07 22:16:04 +02001080 struct hci_dev *hdev = container_of(work, struct hci_dev,
1081 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001082
1083 BT_DBG("%s", hdev->name);
1084
Johan Hedberg32435532011-11-07 22:16:04 +02001085 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1086
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001087 hci_dev_close(hdev->id);
1088}
1089
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001090static void hci_discov_off(struct work_struct *work)
1091{
1092 struct hci_dev *hdev;
1093 u8 scan = SCAN_PAGE;
1094
1095 hdev = container_of(work, struct hci_dev, discov_off.work);
1096
1097 BT_DBG("%s", hdev->name);
1098
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001099 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001100
1101 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1102
1103 hdev->discov_timeout = 0;
1104
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001105 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001106}
1107
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001108int hci_uuids_clear(struct hci_dev *hdev)
1109{
1110 struct list_head *p, *n;
1111
1112 list_for_each_safe(p, n, &hdev->uuids) {
1113 struct bt_uuid *uuid;
1114
1115 uuid = list_entry(p, struct bt_uuid, list);
1116
1117 list_del(p);
1118 kfree(uuid);
1119 }
1120
1121 return 0;
1122}
1123
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001124int hci_link_keys_clear(struct hci_dev *hdev)
1125{
1126 struct list_head *p, *n;
1127
1128 list_for_each_safe(p, n, &hdev->link_keys) {
1129 struct link_key *key;
1130
1131 key = list_entry(p, struct link_key, list);
1132
1133 list_del(p);
1134 kfree(key);
1135 }
1136
1137 return 0;
1138}
1139
1140struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1141{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001142 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001143
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001144 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001145 if (bacmp(bdaddr, &k->bdaddr) == 0)
1146 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001147
1148 return NULL;
1149}
1150
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001151static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1152 u8 key_type, u8 old_key_type)
1153{
1154 /* Legacy key */
1155 if (key_type < 0x03)
1156 return 1;
1157
1158 /* Debug keys are insecure so don't store them persistently */
1159 if (key_type == HCI_LK_DEBUG_COMBINATION)
1160 return 0;
1161
1162 /* Changed combination key and there's no previous one */
1163 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1164 return 0;
1165
1166 /* Security mode 3 case */
1167 if (!conn)
1168 return 1;
1169
1170 /* Neither local nor remote side had no-bonding as requirement */
1171 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1172 return 1;
1173
1174 /* Local side had dedicated bonding as requirement */
1175 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1176 return 1;
1177
1178 /* Remote side had dedicated bonding as requirement */
1179 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1180 return 1;
1181
1182 /* If none of the above criteria match, then don't store the key
1183 * persistently */
1184 return 0;
1185}
1186
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001187struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1188{
1189 struct link_key *k;
1190
1191 list_for_each_entry(k, &hdev->link_keys, list) {
1192 struct key_master_id *id;
1193
1194 if (k->type != HCI_LK_SMP_LTK)
1195 continue;
1196
1197 if (k->dlen != sizeof(*id))
1198 continue;
1199
1200 id = (void *) &k->data;
1201 if (id->ediv == ediv &&
1202 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1203 return k;
1204 }
1205
1206 return NULL;
1207}
1208EXPORT_SYMBOL(hci_find_ltk);
1209
1210struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1211 bdaddr_t *bdaddr, u8 type)
1212{
1213 struct link_key *k;
1214
1215 list_for_each_entry(k, &hdev->link_keys, list)
1216 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1217 return k;
1218
1219 return NULL;
1220}
1221EXPORT_SYMBOL(hci_find_link_key_type);
1222
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001223int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1224 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001225{
1226 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001227 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001228
1229 old_key = hci_find_link_key(hdev, bdaddr);
1230 if (old_key) {
1231 old_key_type = old_key->type;
1232 key = old_key;
1233 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001234 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001235 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1236 if (!key)
1237 return -ENOMEM;
1238 list_add(&key->list, &hdev->link_keys);
1239 }
1240
1241 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1242
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001243 /* Some buggy controller combinations generate a changed
1244 * combination key for legacy pairing even when there's no
1245 * previous key */
1246 if (type == HCI_LK_CHANGED_COMBINATION &&
1247 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001248 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001249 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001250 if (conn)
1251 conn->key_type = type;
1252 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001253
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001254 bacpy(&key->bdaddr, bdaddr);
1255 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001256 key->pin_len = pin_len;
1257
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001258 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001259 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001260 else
1261 key->type = type;
1262
Johan Hedberg4df378a2011-04-28 11:29:03 -07001263 if (!new_key)
1264 return 0;
1265
1266 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1267
Johan Hedberg744cf192011-11-08 20:40:14 +02001268 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001269
1270 if (!persistent) {
1271 list_del(&key->list);
1272 kfree(key);
1273 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001274
1275 return 0;
1276}
1277
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001278int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001279 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001280{
1281 struct link_key *key, *old_key;
1282 struct key_master_id *id;
1283 u8 old_key_type;
1284
1285 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1286
1287 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1288 if (old_key) {
1289 key = old_key;
1290 old_key_type = old_key->type;
1291 } else {
1292 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1293 if (!key)
1294 return -ENOMEM;
1295 list_add(&key->list, &hdev->link_keys);
1296 old_key_type = 0xff;
1297 }
1298
1299 key->dlen = sizeof(*id);
1300
1301 bacpy(&key->bdaddr, bdaddr);
1302 memcpy(key->val, ltk, sizeof(key->val));
1303 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001304 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001305
1306 id = (void *) &key->data;
1307 id->ediv = ediv;
1308 memcpy(id->rand, rand, sizeof(id->rand));
1309
1310 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001311 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001312
1313 return 0;
1314}
1315
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001316int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1317{
1318 struct link_key *key;
1319
1320 key = hci_find_link_key(hdev, bdaddr);
1321 if (!key)
1322 return -ENOENT;
1323
1324 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1325
1326 list_del(&key->list);
1327 kfree(key);
1328
1329 return 0;
1330}
1331
Ville Tervo6bd32322011-02-16 16:32:41 +02001332/* HCI command timer function */
1333static void hci_cmd_timer(unsigned long arg)
1334{
1335 struct hci_dev *hdev = (void *) arg;
1336
1337 BT_ERR("%s command tx timeout", hdev->name);
1338 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001339 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001340}
1341
Szymon Janc2763eda2011-03-22 13:12:22 +01001342struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1343 bdaddr_t *bdaddr)
1344{
1345 struct oob_data *data;
1346
1347 list_for_each_entry(data, &hdev->remote_oob_data, list)
1348 if (bacmp(bdaddr, &data->bdaddr) == 0)
1349 return data;
1350
1351 return NULL;
1352}
1353
1354int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355{
1356 struct oob_data *data;
1357
1358 data = hci_find_remote_oob_data(hdev, bdaddr);
1359 if (!data)
1360 return -ENOENT;
1361
1362 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364 list_del(&data->list);
1365 kfree(data);
1366
1367 return 0;
1368}
1369
1370int hci_remote_oob_data_clear(struct hci_dev *hdev)
1371{
1372 struct oob_data *data, *n;
1373
1374 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1375 list_del(&data->list);
1376 kfree(data);
1377 }
1378
1379 return 0;
1380}
1381
1382int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1383 u8 *randomizer)
1384{
1385 struct oob_data *data;
1386
1387 data = hci_find_remote_oob_data(hdev, bdaddr);
1388
1389 if (!data) {
1390 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1391 if (!data)
1392 return -ENOMEM;
1393
1394 bacpy(&data->bdaddr, bdaddr);
1395 list_add(&data->list, &hdev->remote_oob_data);
1396 }
1397
1398 memcpy(data->hash, hash, sizeof(data->hash));
1399 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1400
1401 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1402
1403 return 0;
1404}
1405
Antti Julkub2a66aa2011-06-15 12:01:14 +03001406struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1407 bdaddr_t *bdaddr)
1408{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001409 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001410
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001411 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001412 if (bacmp(bdaddr, &b->bdaddr) == 0)
1413 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001414
1415 return NULL;
1416}
1417
1418int hci_blacklist_clear(struct hci_dev *hdev)
1419{
1420 struct list_head *p, *n;
1421
1422 list_for_each_safe(p, n, &hdev->blacklist) {
1423 struct bdaddr_list *b;
1424
1425 b = list_entry(p, struct bdaddr_list, list);
1426
1427 list_del(p);
1428 kfree(b);
1429 }
1430
1431 return 0;
1432}
1433
1434int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1435{
1436 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001437
1438 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1439 return -EBADF;
1440
Antti Julku5e762442011-08-25 16:48:02 +03001441 if (hci_blacklist_lookup(hdev, bdaddr))
1442 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001443
1444 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001445 if (!entry)
1446 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001447
1448 bacpy(&entry->bdaddr, bdaddr);
1449
1450 list_add(&entry->list, &hdev->blacklist);
1451
Johan Hedberg744cf192011-11-08 20:40:14 +02001452 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001453}
1454
1455int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1456{
1457 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001458
Szymon Janc1ec918c2011-11-16 09:32:21 +01001459 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001460 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001461
1462 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001463 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001464 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001465
1466 list_del(&entry->list);
1467 kfree(entry);
1468
Johan Hedberg744cf192011-11-08 20:40:14 +02001469 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001470}
1471
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001472static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001473{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001474 struct hci_dev *hdev = container_of(work, struct hci_dev,
1475 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001476
1477 hci_dev_lock(hdev);
1478
1479 hci_adv_entries_clear(hdev);
1480
1481 hci_dev_unlock(hdev);
1482}
1483
Andre Guedes76c86862011-05-26 16:23:50 -03001484int hci_adv_entries_clear(struct hci_dev *hdev)
1485{
1486 struct adv_entry *entry, *tmp;
1487
1488 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1489 list_del(&entry->list);
1490 kfree(entry);
1491 }
1492
1493 BT_DBG("%s adv cache cleared", hdev->name);
1494
1495 return 0;
1496}
1497
1498struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1499{
1500 struct adv_entry *entry;
1501
1502 list_for_each_entry(entry, &hdev->adv_entries, list)
1503 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1504 return entry;
1505
1506 return NULL;
1507}
1508
1509static inline int is_connectable_adv(u8 evt_type)
1510{
1511 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1512 return 1;
1513
1514 return 0;
1515}
1516
1517int hci_add_adv_entry(struct hci_dev *hdev,
1518 struct hci_ev_le_advertising_info *ev)
1519{
1520 struct adv_entry *entry;
1521
1522 if (!is_connectable_adv(ev->evt_type))
1523 return -EINVAL;
1524
1525 /* Only new entries should be added to adv_entries. So, if
1526 * bdaddr was found, don't add it. */
1527 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1528 return 0;
1529
1530 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1531 if (!entry)
1532 return -ENOMEM;
1533
1534 bacpy(&entry->bdaddr, &ev->bdaddr);
1535 entry->bdaddr_type = ev->bdaddr_type;
1536
1537 list_add(&entry->list, &hdev->adv_entries);
1538
1539 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1540 batostr(&entry->bdaddr), entry->bdaddr_type);
1541
1542 return 0;
1543}
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545/* Register HCI device */
1546int hci_register_dev(struct hci_dev *hdev)
1547{
1548 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001549 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001551 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1552 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553
1554 if (!hdev->open || !hdev->close || !hdev->destruct)
1555 return -EINVAL;
1556
Mat Martineau08add512011-11-02 16:18:36 -07001557 /* Do not allow HCI_AMP devices to register at index 0,
1558 * so the index can be used as the AMP controller ID.
1559 */
1560 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1561
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001562 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
1564 /* Find first available device id */
1565 list_for_each(p, &hci_dev_list) {
1566 if (list_entry(p, struct hci_dev, list)->id != id)
1567 break;
1568 head = p; id++;
1569 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 sprintf(hdev->name, "hci%d", id);
1572 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001573 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
1575 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001576 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001579 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001581 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001583 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Marcel Holtmann04837f62006-07-03 10:02:33 +02001585 hdev->idle_timeout = 0;
1586 hdev->sniff_max_interval = 800;
1587 hdev->sniff_min_interval = 80;
1588
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001589 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001590 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001591 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593
1594 skb_queue_head_init(&hdev->rx_q);
1595 skb_queue_head_init(&hdev->cmd_q);
1596 skb_queue_head_init(&hdev->raw_q);
1597
Ville Tervo6bd32322011-02-16 16:32:41 +02001598 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1599
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301600 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001601 hdev->reassembly[i] = NULL;
1602
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001604 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605
Johan Hedberg30883512012-01-04 14:16:21 +02001606 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
1608 hci_conn_hash_init(hdev);
1609
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001610 INIT_LIST_HEAD(&hdev->mgmt_pending);
1611
David Millerea4bd8b2010-07-30 21:54:49 -07001612 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001613
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001614 INIT_LIST_HEAD(&hdev->uuids);
1615
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001616 INIT_LIST_HEAD(&hdev->link_keys);
1617
Szymon Janc2763eda2011-03-22 13:12:22 +01001618 INIT_LIST_HEAD(&hdev->remote_oob_data);
1619
Andre Guedes76c86862011-05-26 16:23:50 -03001620 INIT_LIST_HEAD(&hdev->adv_entries);
1621
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001622 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001623 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001624 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001625
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001626 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1629
1630 atomic_set(&hdev->promisc, 0);
1631
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001632 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001634 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1635 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001636 if (!hdev->workqueue) {
1637 error = -ENOMEM;
1638 goto err;
1639 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001640
David Herrmann33ca9542011-10-08 14:58:49 +02001641 error = hci_add_sysfs(hdev);
1642 if (error < 0)
1643 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001645 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1646 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1647 if (hdev->rfkill) {
1648 if (rfkill_register(hdev->rfkill) < 0) {
1649 rfkill_destroy(hdev->rfkill);
1650 hdev->rfkill = NULL;
1651 }
1652 }
1653
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001654 set_bit(HCI_AUTO_OFF, &hdev->flags);
1655 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001656 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001657
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658 hci_notify(hdev, HCI_DEV_REG);
1659
1660 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001661
David Herrmann33ca9542011-10-08 14:58:49 +02001662err_wqueue:
1663 destroy_workqueue(hdev->workqueue);
1664err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001665 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001666 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001667 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001668
David Herrmann33ca9542011-10-08 14:58:49 +02001669 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670}
1671EXPORT_SYMBOL(hci_register_dev);
1672
1673/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001674void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675{
Marcel Holtmannef222012007-07-11 06:42:04 +02001676 int i;
1677
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001678 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001680 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001682 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
1684 hci_dev_do_close(hdev);
1685
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301686 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001687 kfree_skb(hdev->reassembly[i]);
1688
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001689 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001690 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001691 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001692 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001693 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001694 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001695
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001696 /* mgmt_index_removed should take care of emptying the
1697 * pending list */
1698 BUG_ON(!list_empty(&hdev->mgmt_pending));
1699
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 hci_notify(hdev, HCI_DEV_UNREG);
1701
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001702 if (hdev->rfkill) {
1703 rfkill_unregister(hdev->rfkill);
1704 rfkill_destroy(hdev->rfkill);
1705 }
1706
David Herrmannce242972011-10-08 14:58:48 +02001707 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001708
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001709 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001710
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001711 destroy_workqueue(hdev->workqueue);
1712
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001713 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001714 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001715 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001716 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001717 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001718 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001719 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001720
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722}
1723EXPORT_SYMBOL(hci_unregister_dev);
1724
1725/* Suspend HCI device */
1726int hci_suspend_dev(struct hci_dev *hdev)
1727{
1728 hci_notify(hdev, HCI_DEV_SUSPEND);
1729 return 0;
1730}
1731EXPORT_SYMBOL(hci_suspend_dev);
1732
1733/* Resume HCI device */
1734int hci_resume_dev(struct hci_dev *hdev)
1735{
1736 hci_notify(hdev, HCI_DEV_RESUME);
1737 return 0;
1738}
1739EXPORT_SYMBOL(hci_resume_dev);
1740
Marcel Holtmann76bca882009-11-18 00:40:39 +01001741/* Receive frame from HCI drivers */
1742int hci_recv_frame(struct sk_buff *skb)
1743{
1744 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1745 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1746 && !test_bit(HCI_INIT, &hdev->flags))) {
1747 kfree_skb(skb);
1748 return -ENXIO;
1749 }
1750
1751 /* Incomming skb */
1752 bt_cb(skb)->incoming = 1;
1753
1754 /* Time stamp */
1755 __net_timestamp(skb);
1756
Marcel Holtmann76bca882009-11-18 00:40:39 +01001757 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001758 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001759
Marcel Holtmann76bca882009-11-18 00:40:39 +01001760 return 0;
1761}
1762EXPORT_SYMBOL(hci_recv_frame);
1763
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301764static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001765 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301766{
1767 int len = 0;
1768 int hlen = 0;
1769 int remain = count;
1770 struct sk_buff *skb;
1771 struct bt_skb_cb *scb;
1772
1773 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1774 index >= NUM_REASSEMBLY)
1775 return -EILSEQ;
1776
1777 skb = hdev->reassembly[index];
1778
1779 if (!skb) {
1780 switch (type) {
1781 case HCI_ACLDATA_PKT:
1782 len = HCI_MAX_FRAME_SIZE;
1783 hlen = HCI_ACL_HDR_SIZE;
1784 break;
1785 case HCI_EVENT_PKT:
1786 len = HCI_MAX_EVENT_SIZE;
1787 hlen = HCI_EVENT_HDR_SIZE;
1788 break;
1789 case HCI_SCODATA_PKT:
1790 len = HCI_MAX_SCO_SIZE;
1791 hlen = HCI_SCO_HDR_SIZE;
1792 break;
1793 }
1794
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001795 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301796 if (!skb)
1797 return -ENOMEM;
1798
1799 scb = (void *) skb->cb;
1800 scb->expect = hlen;
1801 scb->pkt_type = type;
1802
1803 skb->dev = (void *) hdev;
1804 hdev->reassembly[index] = skb;
1805 }
1806
1807 while (count) {
1808 scb = (void *) skb->cb;
1809 len = min(scb->expect, (__u16)count);
1810
1811 memcpy(skb_put(skb, len), data, len);
1812
1813 count -= len;
1814 data += len;
1815 scb->expect -= len;
1816 remain = count;
1817
1818 switch (type) {
1819 case HCI_EVENT_PKT:
1820 if (skb->len == HCI_EVENT_HDR_SIZE) {
1821 struct hci_event_hdr *h = hci_event_hdr(skb);
1822 scb->expect = h->plen;
1823
1824 if (skb_tailroom(skb) < scb->expect) {
1825 kfree_skb(skb);
1826 hdev->reassembly[index] = NULL;
1827 return -ENOMEM;
1828 }
1829 }
1830 break;
1831
1832 case HCI_ACLDATA_PKT:
1833 if (skb->len == HCI_ACL_HDR_SIZE) {
1834 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1835 scb->expect = __le16_to_cpu(h->dlen);
1836
1837 if (skb_tailroom(skb) < scb->expect) {
1838 kfree_skb(skb);
1839 hdev->reassembly[index] = NULL;
1840 return -ENOMEM;
1841 }
1842 }
1843 break;
1844
1845 case HCI_SCODATA_PKT:
1846 if (skb->len == HCI_SCO_HDR_SIZE) {
1847 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1848 scb->expect = h->dlen;
1849
1850 if (skb_tailroom(skb) < scb->expect) {
1851 kfree_skb(skb);
1852 hdev->reassembly[index] = NULL;
1853 return -ENOMEM;
1854 }
1855 }
1856 break;
1857 }
1858
1859 if (scb->expect == 0) {
1860 /* Complete frame */
1861
1862 bt_cb(skb)->pkt_type = type;
1863 hci_recv_frame(skb);
1864
1865 hdev->reassembly[index] = NULL;
1866 return remain;
1867 }
1868 }
1869
1870 return remain;
1871}
1872
Marcel Holtmannef222012007-07-11 06:42:04 +02001873int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1874{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301875 int rem = 0;
1876
Marcel Holtmannef222012007-07-11 06:42:04 +02001877 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1878 return -EILSEQ;
1879
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001880 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001881 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301882 if (rem < 0)
1883 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001884
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301885 data += (count - rem);
1886 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001887 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001888
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301889 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001890}
1891EXPORT_SYMBOL(hci_recv_fragment);
1892
Suraj Sumangala99811512010-07-14 13:02:19 +05301893#define STREAM_REASSEMBLY 0
1894
1895int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1896{
1897 int type;
1898 int rem = 0;
1899
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001900 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301901 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1902
1903 if (!skb) {
1904 struct { char type; } *pkt;
1905
1906 /* Start of the frame */
1907 pkt = data;
1908 type = pkt->type;
1909
1910 data++;
1911 count--;
1912 } else
1913 type = bt_cb(skb)->pkt_type;
1914
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001915 rem = hci_reassembly(hdev, type, data, count,
1916 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301917 if (rem < 0)
1918 return rem;
1919
1920 data += (count - rem);
1921 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001922 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301923
1924 return rem;
1925}
1926EXPORT_SYMBOL(hci_recv_stream_fragment);
1927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928/* ---- Interface to upper protocols ---- */
1929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930int hci_register_cb(struct hci_cb *cb)
1931{
1932 BT_DBG("%p name %s", cb, cb->name);
1933
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001934 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001936 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937
1938 return 0;
1939}
1940EXPORT_SYMBOL(hci_register_cb);
1941
1942int hci_unregister_cb(struct hci_cb *cb)
1943{
1944 BT_DBG("%p name %s", cb, cb->name);
1945
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001946 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001948 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 return 0;
1951}
1952EXPORT_SYMBOL(hci_unregister_cb);
1953
1954static int hci_send_frame(struct sk_buff *skb)
1955{
1956 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1957
1958 if (!hdev) {
1959 kfree_skb(skb);
1960 return -ENODEV;
1961 }
1962
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001963 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
1965 if (atomic_read(&hdev->promisc)) {
1966 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001967 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001969 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 }
1971
1972 /* Get rid of skb owner, prior to sending to the driver. */
1973 skb_orphan(skb);
1974
1975 return hdev->send(skb);
1976}
1977
1978/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001979int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
1981 int len = HCI_COMMAND_HDR_SIZE + plen;
1982 struct hci_command_hdr *hdr;
1983 struct sk_buff *skb;
1984
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001985 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 skb = bt_skb_alloc(len, GFP_ATOMIC);
1988 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001989 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 return -ENOMEM;
1991 }
1992
1993 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001994 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 hdr->plen = plen;
1996
1997 if (plen)
1998 memcpy(skb_put(skb, plen), param, plen);
1999
2000 BT_DBG("skb len %d", skb->len);
2001
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002002 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002004
Johan Hedberga5040ef2011-01-10 13:28:59 +02002005 if (test_bit(HCI_INIT, &hdev->flags))
2006 hdev->init_last_cmd = opcode;
2007
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002009 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010
2011 return 0;
2012}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
2014/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002015void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
2017 struct hci_command_hdr *hdr;
2018
2019 if (!hdev->sent_cmd)
2020 return NULL;
2021
2022 hdr = (void *) hdev->sent_cmd->data;
2023
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002024 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 return NULL;
2026
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002027 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2030}
2031
2032/* Send ACL data */
2033static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2034{
2035 struct hci_acl_hdr *hdr;
2036 int len = skb->len;
2037
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002038 skb_push(skb, HCI_ACL_HDR_SIZE);
2039 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002040 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002041 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2042 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002045static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2046 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
2048 struct hci_dev *hdev = conn->hdev;
2049 struct sk_buff *list;
2050
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002051 list = skb_shinfo(skb)->frag_list;
2052 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 /* Non fragmented */
2054 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2055
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002056 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 } else {
2058 /* Fragmented */
2059 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2060
2061 skb_shinfo(skb)->frag_list = NULL;
2062
2063 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002064 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002066 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002067
2068 flags &= ~ACL_START;
2069 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 do {
2071 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002074 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002075 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
2077 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2078
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002079 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 } while (list);
2081
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002082 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002084}
2085
2086void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2087{
2088 struct hci_conn *conn = chan->conn;
2089 struct hci_dev *hdev = conn->hdev;
2090
2091 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2092
2093 skb->dev = (void *) hdev;
2094 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2095 hci_add_acl_hdr(skb, conn->handle, flags);
2096
2097 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002099 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100}
2101EXPORT_SYMBOL(hci_send_acl);
2102
2103/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002104void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105{
2106 struct hci_dev *hdev = conn->hdev;
2107 struct hci_sco_hdr hdr;
2108
2109 BT_DBG("%s len %d", hdev->name, skb->len);
2110
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002111 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 hdr.dlen = skb->len;
2113
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002114 skb_push(skb, HCI_SCO_HDR_SIZE);
2115 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002116 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002119 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002122 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123}
2124EXPORT_SYMBOL(hci_send_sco);
2125
2126/* ---- HCI TX task (outgoing data) ---- */
2127
2128/* HCI Connection scheduler */
2129static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2130{
2131 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002132 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002135 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002137
2138 rcu_read_lock();
2139
2140 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002141 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002143
2144 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2145 continue;
2146
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 num++;
2148
2149 if (c->sent < min) {
2150 min = c->sent;
2151 conn = c;
2152 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002153
2154 if (hci_conn_num(hdev, type) == num)
2155 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002158 rcu_read_unlock();
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002161 int cnt, q;
2162
2163 switch (conn->type) {
2164 case ACL_LINK:
2165 cnt = hdev->acl_cnt;
2166 break;
2167 case SCO_LINK:
2168 case ESCO_LINK:
2169 cnt = hdev->sco_cnt;
2170 break;
2171 case LE_LINK:
2172 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2173 break;
2174 default:
2175 cnt = 0;
2176 BT_ERR("Unknown link type");
2177 }
2178
2179 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 *quote = q ? q : 1;
2181 } else
2182 *quote = 0;
2183
2184 BT_DBG("conn %p quote %d", conn, *quote);
2185 return conn;
2186}
2187
Ville Tervobae1f5d2011-02-10 22:38:53 -03002188static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
2190 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002191 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Ville Tervobae1f5d2011-02-10 22:38:53 -03002193 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002195 rcu_read_lock();
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002198 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002199 if (c->type == type && c->sent) {
2200 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 hdev->name, batostr(&c->dst));
2202 hci_acl_disconn(c, 0x13);
2203 }
2204 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002205
2206 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207}
2208
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2210 int *quote)
2211{
2212 struct hci_conn_hash *h = &hdev->conn_hash;
2213 struct hci_chan *chan = NULL;
2214 int num = 0, min = ~0, cur_prio = 0;
2215 struct hci_conn *conn;
2216 int cnt, q, conn_num = 0;
2217
2218 BT_DBG("%s", hdev->name);
2219
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002220 rcu_read_lock();
2221
2222 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002223 struct hci_chan *tmp;
2224
2225 if (conn->type != type)
2226 continue;
2227
2228 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2229 continue;
2230
2231 conn_num++;
2232
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002233 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002234 struct sk_buff *skb;
2235
2236 if (skb_queue_empty(&tmp->data_q))
2237 continue;
2238
2239 skb = skb_peek(&tmp->data_q);
2240 if (skb->priority < cur_prio)
2241 continue;
2242
2243 if (skb->priority > cur_prio) {
2244 num = 0;
2245 min = ~0;
2246 cur_prio = skb->priority;
2247 }
2248
2249 num++;
2250
2251 if (conn->sent < min) {
2252 min = conn->sent;
2253 chan = tmp;
2254 }
2255 }
2256
2257 if (hci_conn_num(hdev, type) == conn_num)
2258 break;
2259 }
2260
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002261 rcu_read_unlock();
2262
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002263 if (!chan)
2264 return NULL;
2265
2266 switch (chan->conn->type) {
2267 case ACL_LINK:
2268 cnt = hdev->acl_cnt;
2269 break;
2270 case SCO_LINK:
2271 case ESCO_LINK:
2272 cnt = hdev->sco_cnt;
2273 break;
2274 case LE_LINK:
2275 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2276 break;
2277 default:
2278 cnt = 0;
2279 BT_ERR("Unknown link type");
2280 }
2281
2282 q = cnt / num;
2283 *quote = q ? q : 1;
2284 BT_DBG("chan %p quote %d", chan, *quote);
2285 return chan;
2286}
2287
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002288static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2289{
2290 struct hci_conn_hash *h = &hdev->conn_hash;
2291 struct hci_conn *conn;
2292 int num = 0;
2293
2294 BT_DBG("%s", hdev->name);
2295
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002296 rcu_read_lock();
2297
2298 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002299 struct hci_chan *chan;
2300
2301 if (conn->type != type)
2302 continue;
2303
2304 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2305 continue;
2306
2307 num++;
2308
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002309 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002310 struct sk_buff *skb;
2311
2312 if (chan->sent) {
2313 chan->sent = 0;
2314 continue;
2315 }
2316
2317 if (skb_queue_empty(&chan->data_q))
2318 continue;
2319
2320 skb = skb_peek(&chan->data_q);
2321 if (skb->priority >= HCI_PRIO_MAX - 1)
2322 continue;
2323
2324 skb->priority = HCI_PRIO_MAX - 1;
2325
2326 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2327 skb->priority);
2328 }
2329
2330 if (hci_conn_num(hdev, type) == num)
2331 break;
2332 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002333
2334 rcu_read_unlock();
2335
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002336}
2337
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338static inline void hci_sched_acl(struct hci_dev *hdev)
2339{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002340 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 struct sk_buff *skb;
2342 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002343 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
2345 BT_DBG("%s", hdev->name);
2346
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002347 if (!hci_conn_num(hdev, ACL_LINK))
2348 return;
2349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 if (!test_bit(HCI_RAW, &hdev->flags)) {
2351 /* ACL tx timeout must be longer than maximum
2352 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur82453022008-02-17 23:25:57 -08002353 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002354 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 }
2356
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002357 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002358
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002359 while (hdev->acl_cnt &&
2360 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002361 u32 priority = (skb_peek(&chan->data_q))->priority;
2362 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002363 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2364 skb->len, skb->priority);
2365
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002366 /* Stop if priority has changed */
2367 if (skb->priority < priority)
2368 break;
2369
2370 skb = skb_dequeue(&chan->data_q);
2371
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002372 hci_conn_enter_active_mode(chan->conn,
2373 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 hci_send_frame(skb);
2376 hdev->acl_last_tx = jiffies;
2377
2378 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002379 chan->sent++;
2380 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 }
2382 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002383
2384 if (cnt != hdev->acl_cnt)
2385 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387
2388/* Schedule SCO */
2389static inline void hci_sched_sco(struct hci_dev *hdev)
2390{
2391 struct hci_conn *conn;
2392 struct sk_buff *skb;
2393 int quote;
2394
2395 BT_DBG("%s", hdev->name);
2396
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002397 if (!hci_conn_num(hdev, SCO_LINK))
2398 return;
2399
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2401 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2402 BT_DBG("skb %p len %d", skb, skb->len);
2403 hci_send_frame(skb);
2404
2405 conn->sent++;
2406 if (conn->sent == ~0)
2407 conn->sent = 0;
2408 }
2409 }
2410}
2411
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002412static inline void hci_sched_esco(struct hci_dev *hdev)
2413{
2414 struct hci_conn *conn;
2415 struct sk_buff *skb;
2416 int quote;
2417
2418 BT_DBG("%s", hdev->name);
2419
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002420 if (!hci_conn_num(hdev, ESCO_LINK))
2421 return;
2422
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002423 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2424 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2425 BT_DBG("skb %p len %d", skb, skb->len);
2426 hci_send_frame(skb);
2427
2428 conn->sent++;
2429 if (conn->sent == ~0)
2430 conn->sent = 0;
2431 }
2432 }
2433}
2434
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002435static inline void hci_sched_le(struct hci_dev *hdev)
2436{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002437 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002438 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002439 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002440
2441 BT_DBG("%s", hdev->name);
2442
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002443 if (!hci_conn_num(hdev, LE_LINK))
2444 return;
2445
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002446 if (!test_bit(HCI_RAW, &hdev->flags)) {
2447 /* LE tx timeout must be longer than maximum
2448 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002449 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002450 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002451 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002452 }
2453
2454 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002455 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002456 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002457 u32 priority = (skb_peek(&chan->data_q))->priority;
2458 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002459 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2460 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002461
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002462 /* Stop if priority has changed */
2463 if (skb->priority < priority)
2464 break;
2465
2466 skb = skb_dequeue(&chan->data_q);
2467
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002468 hci_send_frame(skb);
2469 hdev->le_last_tx = jiffies;
2470
2471 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002472 chan->sent++;
2473 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002474 }
2475 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002476
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002477 if (hdev->le_pkts)
2478 hdev->le_cnt = cnt;
2479 else
2480 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002481
2482 if (cnt != tmp)
2483 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002484}
2485
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002486static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002488 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002489 struct sk_buff *skb;
2490
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002491 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2492 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493
2494 /* Schedule queues and send stuff to HCI driver */
2495
2496 hci_sched_acl(hdev);
2497
2498 hci_sched_sco(hdev);
2499
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002500 hci_sched_esco(hdev);
2501
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002502 hci_sched_le(hdev);
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 /* Send next queued raw (unknown type) packet */
2505 while ((skb = skb_dequeue(&hdev->raw_q)))
2506 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507}
2508
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002509/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510
2511/* ACL data packet */
2512static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2513{
2514 struct hci_acl_hdr *hdr = (void *) skb->data;
2515 struct hci_conn *conn;
2516 __u16 handle, flags;
2517
2518 skb_pull(skb, HCI_ACL_HDR_SIZE);
2519
2520 handle = __le16_to_cpu(hdr->handle);
2521 flags = hci_flags(handle);
2522 handle = hci_handle(handle);
2523
2524 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2525
2526 hdev->stat.acl_rx++;
2527
2528 hci_dev_lock(hdev);
2529 conn = hci_conn_hash_lookup_handle(hdev, handle);
2530 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002533 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002536 l2cap_recv_acldata(conn, skb, flags);
2537 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002539 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 hdev->name, handle);
2541 }
2542
2543 kfree_skb(skb);
2544}
2545
2546/* SCO data packet */
2547static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2548{
2549 struct hci_sco_hdr *hdr = (void *) skb->data;
2550 struct hci_conn *conn;
2551 __u16 handle;
2552
2553 skb_pull(skb, HCI_SCO_HDR_SIZE);
2554
2555 handle = __le16_to_cpu(hdr->handle);
2556
2557 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2558
2559 hdev->stat.sco_rx++;
2560
2561 hci_dev_lock(hdev);
2562 conn = hci_conn_hash_lookup_handle(hdev, handle);
2563 hci_dev_unlock(hdev);
2564
2565 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002567 sco_recv_scodata(conn, skb);
2568 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002570 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 hdev->name, handle);
2572 }
2573
2574 kfree_skb(skb);
2575}
2576
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002577static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002579 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 struct sk_buff *skb;
2581
2582 BT_DBG("%s", hdev->name);
2583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 while ((skb = skb_dequeue(&hdev->rx_q))) {
2585 if (atomic_read(&hdev->promisc)) {
2586 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002587 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 }
2589
2590 if (test_bit(HCI_RAW, &hdev->flags)) {
2591 kfree_skb(skb);
2592 continue;
2593 }
2594
2595 if (test_bit(HCI_INIT, &hdev->flags)) {
2596 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002597 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 case HCI_ACLDATA_PKT:
2599 case HCI_SCODATA_PKT:
2600 kfree_skb(skb);
2601 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603 }
2604
2605 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002606 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002608 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 hci_event_packet(hdev, skb);
2610 break;
2611
2612 case HCI_ACLDATA_PKT:
2613 BT_DBG("%s ACL data packet", hdev->name);
2614 hci_acldata_packet(hdev, skb);
2615 break;
2616
2617 case HCI_SCODATA_PKT:
2618 BT_DBG("%s SCO data packet", hdev->name);
2619 hci_scodata_packet(hdev, skb);
2620 break;
2621
2622 default:
2623 kfree_skb(skb);
2624 break;
2625 }
2626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627}
2628
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002629static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002631 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 struct sk_buff *skb;
2633
2634 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2635
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002637 if (atomic_read(&hdev->cmd_cnt)) {
2638 skb = skb_dequeue(&hdev->cmd_q);
2639 if (!skb)
2640 return;
2641
Wei Yongjun7585b972009-02-25 18:29:52 +08002642 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002644 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2645 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 atomic_dec(&hdev->cmd_cnt);
2647 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002648 if (test_bit(HCI_RESET, &hdev->flags))
2649 del_timer(&hdev->cmd_timer);
2650 else
2651 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002652 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 } else {
2654 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002655 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 }
2657 }
2658}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002659
2660int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2661{
2662 /* General inquiry access code (GIAC) */
2663 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2664 struct hci_cp_inquiry cp;
2665
2666 BT_DBG("%s", hdev->name);
2667
2668 if (test_bit(HCI_INQUIRY, &hdev->flags))
2669 return -EINPROGRESS;
2670
Johan Hedberg46632622012-01-02 16:06:08 +02002671 inquiry_cache_flush(hdev);
2672
Andre Guedes2519a1f2011-11-07 11:45:24 -03002673 memset(&cp, 0, sizeof(cp));
2674 memcpy(&cp.lap, lap, sizeof(cp.lap));
2675 cp.length = length;
2676
2677 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2678}
Andre Guedes023d5042011-11-04 14:16:52 -03002679
2680int hci_cancel_inquiry(struct hci_dev *hdev)
2681{
2682 BT_DBG("%s", hdev->name);
2683
2684 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2685 return -EPERM;
2686
2687 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2688}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002689
2690module_param(enable_hs, bool, 0644);
2691MODULE_PARM_DESC(enable_hs, "Enable High Speed");