blob: fc09a3cbe20c5ec3777321fbf6ee7dca36d48cbb [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
Johan Hedberg561aafb2012-01-04 13:31:59 +0200360 struct inquiry_cache *cache = &hdev->inq_cache;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200361 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Johan Hedberg561aafb2012-01-04 13:31:59 +0200363 list_for_each_entry_safe(p, n, &cache->all, all) {
364 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200365 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367
368 INIT_LIST_HEAD(&cache->unknown);
369 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200381 return e;
382 }
383
384 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
386
Johan Hedberg561aafb2012-01-04 13:31:59 +0200387struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
388 bdaddr_t *bdaddr)
389{
390 struct inquiry_cache *cache = &hdev->inq_cache;
391 struct inquiry_entry *e;
392
393 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
394
395 list_for_each_entry(e, &cache->unknown, list) {
396 if (!bacmp(&e->data.bdaddr, bdaddr))
397 return e;
398 }
399
400 return NULL;
401}
402
403void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
404 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
406 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200407 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
410
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200411 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412 if (ie)
413 goto update;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415 /* Entry not in the cache. Add new one. */
416 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
417 if (!ie)
418 return;
419
420 list_add(&ie->all, &cache->all);
421
422 if (name_known) {
423 ie->name_state = NAME_KNOWN;
424 } else {
425 ie->name_state = NAME_NOT_KNOWN;
426 list_add(&ie->list, &cache->unknown);
427 }
428
429update:
430 if (name_known && ie->name_state != NAME_KNOWN &&
431 ie->name_state != NAME_PENDING) {
432 ie->name_state = NAME_KNOWN;
433 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 }
435
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436 memcpy(&ie->data, data, sizeof(*data));
437 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 cache->timestamp = jiffies;
439}
440
441static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
442{
443 struct inquiry_cache *cache = &hdev->inq_cache;
444 struct inquiry_info *info = (struct inquiry_info *) buf;
445 struct inquiry_entry *e;
446 int copied = 0;
447
Johan Hedberg561aafb2012-01-04 13:31:59 +0200448 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200450
451 if (copied >= num)
452 break;
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 bacpy(&info->bdaddr, &data->bdaddr);
455 info->pscan_rep_mode = data->pscan_rep_mode;
456 info->pscan_period_mode = data->pscan_period_mode;
457 info->pscan_mode = data->pscan_mode;
458 memcpy(info->dev_class, data->dev_class, 3);
459 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200460
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200462 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 }
464
465 BT_DBG("cache %p, copied %d", cache, copied);
466 return copied;
467}
468
469static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
470{
471 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
472 struct hci_cp_inquiry cp;
473
474 BT_DBG("%s", hdev->name);
475
476 if (test_bit(HCI_INQUIRY, &hdev->flags))
477 return;
478
479 /* Start Inquiry */
480 memcpy(&cp.lap, &ir->lap, 3);
481 cp.length = ir->length;
482 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200483 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484}
485
486int hci_inquiry(void __user *arg)
487{
488 __u8 __user *ptr = arg;
489 struct hci_inquiry_req ir;
490 struct hci_dev *hdev;
491 int err = 0, do_inquiry = 0, max_rsp;
492 long timeo;
493 __u8 *buf;
494
495 if (copy_from_user(&ir, ptr, sizeof(ir)))
496 return -EFAULT;
497
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200498 hdev = hci_dev_get(ir.dev_id);
499 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 return -ENODEV;
501
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300502 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900503 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200504 inquiry_cache_empty(hdev) ||
505 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 inquiry_cache_flush(hdev);
507 do_inquiry = 1;
508 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300509 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510
Marcel Holtmann04837f62006-07-03 10:02:33 +0200511 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200512
513 if (do_inquiry) {
514 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
515 if (err < 0)
516 goto done;
517 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518
519 /* for unlimited number of responses we will use buffer with 255 entries */
520 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
521
522 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
523 * copy it to the user space.
524 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100525 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200526 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 err = -ENOMEM;
528 goto done;
529 }
530
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300531 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300533 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 BT_DBG("num_rsp %d", ir.num_rsp);
536
537 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
538 ptr += sizeof(ir);
539 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
540 ir.num_rsp))
541 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900542 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 err = -EFAULT;
544
545 kfree(buf);
546
547done:
548 hci_dev_put(hdev);
549 return err;
550}
551
552/* ---- HCI ioctl helpers ---- */
553
554int hci_dev_open(__u16 dev)
555{
556 struct hci_dev *hdev;
557 int ret = 0;
558
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200559 hdev = hci_dev_get(dev);
560 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 return -ENODEV;
562
563 BT_DBG("%s %p", hdev->name, hdev);
564
565 hci_req_lock(hdev);
566
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200567 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
568 ret = -ERFKILL;
569 goto done;
570 }
571
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 if (test_bit(HCI_UP, &hdev->flags)) {
573 ret = -EALREADY;
574 goto done;
575 }
576
577 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
578 set_bit(HCI_RAW, &hdev->flags);
579
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200580 /* Treat all non BR/EDR controllers as raw devices if
581 enable_hs is not set */
582 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100583 set_bit(HCI_RAW, &hdev->flags);
584
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 if (hdev->open(hdev)) {
586 ret = -EIO;
587 goto done;
588 }
589
590 if (!test_bit(HCI_RAW, &hdev->flags)) {
591 atomic_set(&hdev->cmd_cnt, 1);
592 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200593 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Marcel Holtmann04837f62006-07-03 10:02:33 +0200595 ret = __hci_request(hdev, hci_init_req, 0,
596 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Andre Guedeseead27d2011-06-30 19:20:55 -0300598 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300599 ret = __hci_request(hdev, hci_le_init_req, 0,
600 msecs_to_jiffies(HCI_INIT_TIMEOUT));
601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 clear_bit(HCI_INIT, &hdev->flags);
603 }
604
605 if (!ret) {
606 hci_dev_hold(hdev);
607 set_bit(HCI_UP, &hdev->flags);
608 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200609 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300610 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200611 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300612 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200613 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900614 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200616 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200617 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400618 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 skb_queue_purge(&hdev->cmd_q);
621 skb_queue_purge(&hdev->rx_q);
622
623 if (hdev->flush)
624 hdev->flush(hdev);
625
626 if (hdev->sent_cmd) {
627 kfree_skb(hdev->sent_cmd);
628 hdev->sent_cmd = NULL;
629 }
630
631 hdev->close(hdev);
632 hdev->flags = 0;
633 }
634
635done:
636 hci_req_unlock(hdev);
637 hci_dev_put(hdev);
638 return ret;
639}
640
641static int hci_dev_do_close(struct hci_dev *hdev)
642{
643 BT_DBG("%s %p", hdev->name, hdev);
644
645 hci_req_cancel(hdev, ENODEV);
646 hci_req_lock(hdev);
647
648 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300649 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 hci_req_unlock(hdev);
651 return 0;
652 }
653
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200654 /* Flush RX and TX works */
655 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400656 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200658 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200659 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200660 hdev->discov_timeout = 0;
661 }
662
Johan Hedberg32435532011-11-07 22:16:04 +0200663 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200664 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200665
Johan Hedberg7d785252011-12-15 00:47:39 +0200666 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
667 cancel_delayed_work(&hdev->service_cache);
668
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300669 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 inquiry_cache_flush(hdev);
671 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300672 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 hci_notify(hdev, HCI_DEV_DOWN);
675
676 if (hdev->flush)
677 hdev->flush(hdev);
678
679 /* Reset device */
680 skb_queue_purge(&hdev->cmd_q);
681 atomic_set(&hdev->cmd_cnt, 1);
682 if (!test_bit(HCI_RAW, &hdev->flags)) {
683 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200684 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200685 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 clear_bit(HCI_INIT, &hdev->flags);
687 }
688
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200689 /* flush cmd work */
690 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
692 /* Drop queues */
693 skb_queue_purge(&hdev->rx_q);
694 skb_queue_purge(&hdev->cmd_q);
695 skb_queue_purge(&hdev->raw_q);
696
697 /* Drop last sent command */
698 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300699 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 kfree_skb(hdev->sent_cmd);
701 hdev->sent_cmd = NULL;
702 }
703
704 /* After this point our queues are empty
705 * and no tasks are scheduled. */
706 hdev->close(hdev);
707
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300708 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200709 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300710 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 /* Clear flags */
713 hdev->flags = 0;
714
715 hci_req_unlock(hdev);
716
717 hci_dev_put(hdev);
718 return 0;
719}
720
721int hci_dev_close(__u16 dev)
722{
723 struct hci_dev *hdev;
724 int err;
725
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200726 hdev = hci_dev_get(dev);
727 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 return -ENODEV;
729 err = hci_dev_do_close(hdev);
730 hci_dev_put(hdev);
731 return err;
732}
733
734int hci_dev_reset(__u16 dev)
735{
736 struct hci_dev *hdev;
737 int ret = 0;
738
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200739 hdev = hci_dev_get(dev);
740 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -ENODEV;
742
743 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744
745 if (!test_bit(HCI_UP, &hdev->flags))
746 goto done;
747
748 /* Drop queues */
749 skb_queue_purge(&hdev->rx_q);
750 skb_queue_purge(&hdev->cmd_q);
751
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300752 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 inquiry_cache_flush(hdev);
754 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300755 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 if (hdev->flush)
758 hdev->flush(hdev);
759
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900760 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300761 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200764 ret = __hci_request(hdev, hci_reset_req, 0,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 hci_req_unlock(hdev);
769 hci_dev_put(hdev);
770 return ret;
771}
772
773int hci_dev_reset_stat(__u16 dev)
774{
775 struct hci_dev *hdev;
776 int ret = 0;
777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200778 hdev = hci_dev_get(dev);
779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 return -ENODEV;
781
782 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
783
784 hci_dev_put(hdev);
785
786 return ret;
787}
788
789int hci_dev_cmd(unsigned int cmd, void __user *arg)
790{
791 struct hci_dev *hdev;
792 struct hci_dev_req dr;
793 int err = 0;
794
795 if (copy_from_user(&dr, arg, sizeof(dr)))
796 return -EFAULT;
797
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200798 hdev = hci_dev_get(dr.dev_id);
799 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 return -ENODEV;
801
802 switch (cmd) {
803 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200804 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
805 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 break;
807
808 case HCISETENCRYPT:
809 if (!lmp_encrypt_capable(hdev)) {
810 err = -EOPNOTSUPP;
811 break;
812 }
813
814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
815 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200816 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
817 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 if (err)
819 break;
820 }
821
Marcel Holtmann04837f62006-07-03 10:02:33 +0200822 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
823 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 break;
825
826 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200827 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 break;
830
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200831 case HCISETLINKPOL:
832 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT));
834 break;
835
836 case HCISETLINKMODE:
837 hdev->link_mode = ((__u16) dr.dev_opt) &
838 (HCI_LM_MASTER | HCI_LM_ACCEPT);
839 break;
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 case HCISETPTYPE:
842 hdev->pkt_type = (__u16) dr.dev_opt;
843 break;
844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200846 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
847 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 break;
849
850 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200851 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
852 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 break;
854
855 default:
856 err = -EINVAL;
857 break;
858 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200859
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 hci_dev_put(hdev);
861 return err;
862}
863
864int hci_get_dev_list(void __user *arg)
865{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200866 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 struct hci_dev_list_req *dl;
868 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 int n = 0, size, err;
870 __u16 dev_num;
871
872 if (get_user(dev_num, (__u16 __user *) arg))
873 return -EFAULT;
874
875 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
876 return -EINVAL;
877
878 size = sizeof(*dl) + dev_num * sizeof(*dr);
879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200880 dl = kzalloc(size, GFP_KERNEL);
881 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 return -ENOMEM;
883
884 dr = dl->dev_req;
885
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200886 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200887 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200888 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200889 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200890
891 if (!test_bit(HCI_MGMT, &hdev->flags))
892 set_bit(HCI_PAIRABLE, &hdev->flags);
893
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 (dr + n)->dev_id = hdev->id;
895 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 if (++n >= dev_num)
898 break;
899 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200900 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
902 dl->dev_num = n;
903 size = sizeof(*dl) + n * sizeof(*dr);
904
905 err = copy_to_user(arg, dl, size);
906 kfree(dl);
907
908 return err ? -EFAULT : 0;
909}
910
911int hci_get_dev_info(void __user *arg)
912{
913 struct hci_dev *hdev;
914 struct hci_dev_info di;
915 int err = 0;
916
917 if (copy_from_user(&di, arg, sizeof(di)))
918 return -EFAULT;
919
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200920 hdev = hci_dev_get(di.dev_id);
921 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -ENODEV;
923
Johan Hedberg32435532011-11-07 22:16:04 +0200924 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
925 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200926
Johan Hedbergc542a062011-01-26 13:11:03 +0200927 if (!test_bit(HCI_MGMT, &hdev->flags))
928 set_bit(HCI_PAIRABLE, &hdev->flags);
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 strcpy(di.name, hdev->name);
931 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100932 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 di.flags = hdev->flags;
934 di.pkt_type = hdev->pkt_type;
935 di.acl_mtu = hdev->acl_mtu;
936 di.acl_pkts = hdev->acl_pkts;
937 di.sco_mtu = hdev->sco_mtu;
938 di.sco_pkts = hdev->sco_pkts;
939 di.link_policy = hdev->link_policy;
940 di.link_mode = hdev->link_mode;
941
942 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
943 memcpy(&di.features, &hdev->features, sizeof(di.features));
944
945 if (copy_to_user(arg, &di, sizeof(di)))
946 err = -EFAULT;
947
948 hci_dev_put(hdev);
949
950 return err;
951}
952
953/* ---- Interface to HCI drivers ---- */
954
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200955static int hci_rfkill_set_block(void *data, bool blocked)
956{
957 struct hci_dev *hdev = data;
958
959 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
960
961 if (!blocked)
962 return 0;
963
964 hci_dev_do_close(hdev);
965
966 return 0;
967}
968
969static const struct rfkill_ops hci_rfkill_ops = {
970 .set_block = hci_rfkill_set_block,
971};
972
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973/* Alloc HCI device */
974struct hci_dev *hci_alloc_dev(void)
975{
976 struct hci_dev *hdev;
977
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200978 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (!hdev)
980 return NULL;
981
David Herrmann0ac7e702011-10-08 14:58:47 +0200982 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 skb_queue_head_init(&hdev->driver_init);
984
985 return hdev;
986}
987EXPORT_SYMBOL(hci_alloc_dev);
988
989/* Free HCI device */
990void hci_free_dev(struct hci_dev *hdev)
991{
992 skb_queue_purge(&hdev->driver_init);
993
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200994 /* will free via device release */
995 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996}
997EXPORT_SYMBOL(hci_free_dev);
998
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200999static void hci_power_on(struct work_struct *work)
1000{
1001 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1002
1003 BT_DBG("%s", hdev->name);
1004
1005 if (hci_dev_open(hdev->id) < 0)
1006 return;
1007
1008 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001009 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001010 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001011
1012 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001013 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001014}
1015
1016static void hci_power_off(struct work_struct *work)
1017{
Johan Hedberg32435532011-11-07 22:16:04 +02001018 struct hci_dev *hdev = container_of(work, struct hci_dev,
1019 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001020
1021 BT_DBG("%s", hdev->name);
1022
Johan Hedberg32435532011-11-07 22:16:04 +02001023 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1024
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001025 hci_dev_close(hdev->id);
1026}
1027
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001028static void hci_discov_off(struct work_struct *work)
1029{
1030 struct hci_dev *hdev;
1031 u8 scan = SCAN_PAGE;
1032
1033 hdev = container_of(work, struct hci_dev, discov_off.work);
1034
1035 BT_DBG("%s", hdev->name);
1036
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001037 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001038
1039 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1040
1041 hdev->discov_timeout = 0;
1042
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001044}
1045
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001046int hci_uuids_clear(struct hci_dev *hdev)
1047{
1048 struct list_head *p, *n;
1049
1050 list_for_each_safe(p, n, &hdev->uuids) {
1051 struct bt_uuid *uuid;
1052
1053 uuid = list_entry(p, struct bt_uuid, list);
1054
1055 list_del(p);
1056 kfree(uuid);
1057 }
1058
1059 return 0;
1060}
1061
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001062int hci_link_keys_clear(struct hci_dev *hdev)
1063{
1064 struct list_head *p, *n;
1065
1066 list_for_each_safe(p, n, &hdev->link_keys) {
1067 struct link_key *key;
1068
1069 key = list_entry(p, struct link_key, list);
1070
1071 list_del(p);
1072 kfree(key);
1073 }
1074
1075 return 0;
1076}
1077
1078struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1079{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001080 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001081
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001082 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001083 if (bacmp(bdaddr, &k->bdaddr) == 0)
1084 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001085
1086 return NULL;
1087}
1088
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001089static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1090 u8 key_type, u8 old_key_type)
1091{
1092 /* Legacy key */
1093 if (key_type < 0x03)
1094 return 1;
1095
1096 /* Debug keys are insecure so don't store them persistently */
1097 if (key_type == HCI_LK_DEBUG_COMBINATION)
1098 return 0;
1099
1100 /* Changed combination key and there's no previous one */
1101 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1102 return 0;
1103
1104 /* Security mode 3 case */
1105 if (!conn)
1106 return 1;
1107
1108 /* Neither local nor remote side had no-bonding as requirement */
1109 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1110 return 1;
1111
1112 /* Local side had dedicated bonding as requirement */
1113 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1114 return 1;
1115
1116 /* Remote side had dedicated bonding as requirement */
1117 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1118 return 1;
1119
1120 /* If none of the above criteria match, then don't store the key
1121 * persistently */
1122 return 0;
1123}
1124
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001125struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1126{
1127 struct link_key *k;
1128
1129 list_for_each_entry(k, &hdev->link_keys, list) {
1130 struct key_master_id *id;
1131
1132 if (k->type != HCI_LK_SMP_LTK)
1133 continue;
1134
1135 if (k->dlen != sizeof(*id))
1136 continue;
1137
1138 id = (void *) &k->data;
1139 if (id->ediv == ediv &&
1140 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1141 return k;
1142 }
1143
1144 return NULL;
1145}
1146EXPORT_SYMBOL(hci_find_ltk);
1147
1148struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1149 bdaddr_t *bdaddr, u8 type)
1150{
1151 struct link_key *k;
1152
1153 list_for_each_entry(k, &hdev->link_keys, list)
1154 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1155 return k;
1156
1157 return NULL;
1158}
1159EXPORT_SYMBOL(hci_find_link_key_type);
1160
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001161int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1162 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001163{
1164 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001165 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001166
1167 old_key = hci_find_link_key(hdev, bdaddr);
1168 if (old_key) {
1169 old_key_type = old_key->type;
1170 key = old_key;
1171 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001172 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1174 if (!key)
1175 return -ENOMEM;
1176 list_add(&key->list, &hdev->link_keys);
1177 }
1178
1179 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1180
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001181 /* Some buggy controller combinations generate a changed
1182 * combination key for legacy pairing even when there's no
1183 * previous key */
1184 if (type == HCI_LK_CHANGED_COMBINATION &&
1185 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001186 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001187 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001188 if (conn)
1189 conn->key_type = type;
1190 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001191
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001192 bacpy(&key->bdaddr, bdaddr);
1193 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001194 key->pin_len = pin_len;
1195
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001196 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001197 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001198 else
1199 key->type = type;
1200
Johan Hedberg4df378a2011-04-28 11:29:03 -07001201 if (!new_key)
1202 return 0;
1203
1204 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1205
Johan Hedberg744cf192011-11-08 20:40:14 +02001206 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001207
1208 if (!persistent) {
1209 list_del(&key->list);
1210 kfree(key);
1211 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001212
1213 return 0;
1214}
1215
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001217 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001218{
1219 struct link_key *key, *old_key;
1220 struct key_master_id *id;
1221 u8 old_key_type;
1222
1223 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1224
1225 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1226 if (old_key) {
1227 key = old_key;
1228 old_key_type = old_key->type;
1229 } else {
1230 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1231 if (!key)
1232 return -ENOMEM;
1233 list_add(&key->list, &hdev->link_keys);
1234 old_key_type = 0xff;
1235 }
1236
1237 key->dlen = sizeof(*id);
1238
1239 bacpy(&key->bdaddr, bdaddr);
1240 memcpy(key->val, ltk, sizeof(key->val));
1241 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001242 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243
1244 id = (void *) &key->data;
1245 id->ediv = ediv;
1246 memcpy(id->rand, rand, sizeof(id->rand));
1247
1248 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001249 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001250
1251 return 0;
1252}
1253
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001254int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1255{
1256 struct link_key *key;
1257
1258 key = hci_find_link_key(hdev, bdaddr);
1259 if (!key)
1260 return -ENOENT;
1261
1262 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1263
1264 list_del(&key->list);
1265 kfree(key);
1266
1267 return 0;
1268}
1269
Ville Tervo6bd32322011-02-16 16:32:41 +02001270/* HCI command timer function */
1271static void hci_cmd_timer(unsigned long arg)
1272{
1273 struct hci_dev *hdev = (void *) arg;
1274
1275 BT_ERR("%s command tx timeout", hdev->name);
1276 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001277 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001278}
1279
Szymon Janc2763eda2011-03-22 13:12:22 +01001280struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1281 bdaddr_t *bdaddr)
1282{
1283 struct oob_data *data;
1284
1285 list_for_each_entry(data, &hdev->remote_oob_data, list)
1286 if (bacmp(bdaddr, &data->bdaddr) == 0)
1287 return data;
1288
1289 return NULL;
1290}
1291
1292int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1293{
1294 struct oob_data *data;
1295
1296 data = hci_find_remote_oob_data(hdev, bdaddr);
1297 if (!data)
1298 return -ENOENT;
1299
1300 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1301
1302 list_del(&data->list);
1303 kfree(data);
1304
1305 return 0;
1306}
1307
1308int hci_remote_oob_data_clear(struct hci_dev *hdev)
1309{
1310 struct oob_data *data, *n;
1311
1312 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1313 list_del(&data->list);
1314 kfree(data);
1315 }
1316
1317 return 0;
1318}
1319
1320int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1321 u8 *randomizer)
1322{
1323 struct oob_data *data;
1324
1325 data = hci_find_remote_oob_data(hdev, bdaddr);
1326
1327 if (!data) {
1328 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1329 if (!data)
1330 return -ENOMEM;
1331
1332 bacpy(&data->bdaddr, bdaddr);
1333 list_add(&data->list, &hdev->remote_oob_data);
1334 }
1335
1336 memcpy(data->hash, hash, sizeof(data->hash));
1337 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1338
1339 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1340
1341 return 0;
1342}
1343
Antti Julkub2a66aa2011-06-15 12:01:14 +03001344struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1345 bdaddr_t *bdaddr)
1346{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001347 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001348
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001349 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001350 if (bacmp(bdaddr, &b->bdaddr) == 0)
1351 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001352
1353 return NULL;
1354}
1355
1356int hci_blacklist_clear(struct hci_dev *hdev)
1357{
1358 struct list_head *p, *n;
1359
1360 list_for_each_safe(p, n, &hdev->blacklist) {
1361 struct bdaddr_list *b;
1362
1363 b = list_entry(p, struct bdaddr_list, list);
1364
1365 list_del(p);
1366 kfree(b);
1367 }
1368
1369 return 0;
1370}
1371
1372int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1373{
1374 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001375
1376 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1377 return -EBADF;
1378
Antti Julku5e762442011-08-25 16:48:02 +03001379 if (hci_blacklist_lookup(hdev, bdaddr))
1380 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001381
1382 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001383 if (!entry)
1384 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001385
1386 bacpy(&entry->bdaddr, bdaddr);
1387
1388 list_add(&entry->list, &hdev->blacklist);
1389
Johan Hedberg744cf192011-11-08 20:40:14 +02001390 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001391}
1392
1393int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001396
Szymon Janc1ec918c2011-11-16 09:32:21 +01001397 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001398 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001399
1400 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001401 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001402 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001403
1404 list_del(&entry->list);
1405 kfree(entry);
1406
Johan Hedberg744cf192011-11-08 20:40:14 +02001407 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001408}
1409
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001410static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001411{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001412 struct hci_dev *hdev = container_of(work, struct hci_dev,
1413 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001414
1415 hci_dev_lock(hdev);
1416
1417 hci_adv_entries_clear(hdev);
1418
1419 hci_dev_unlock(hdev);
1420}
1421
Andre Guedes76c86862011-05-26 16:23:50 -03001422int hci_adv_entries_clear(struct hci_dev *hdev)
1423{
1424 struct adv_entry *entry, *tmp;
1425
1426 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1427 list_del(&entry->list);
1428 kfree(entry);
1429 }
1430
1431 BT_DBG("%s adv cache cleared", hdev->name);
1432
1433 return 0;
1434}
1435
1436struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1437{
1438 struct adv_entry *entry;
1439
1440 list_for_each_entry(entry, &hdev->adv_entries, list)
1441 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1442 return entry;
1443
1444 return NULL;
1445}
1446
1447static inline int is_connectable_adv(u8 evt_type)
1448{
1449 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1450 return 1;
1451
1452 return 0;
1453}
1454
1455int hci_add_adv_entry(struct hci_dev *hdev,
1456 struct hci_ev_le_advertising_info *ev)
1457{
1458 struct adv_entry *entry;
1459
1460 if (!is_connectable_adv(ev->evt_type))
1461 return -EINVAL;
1462
1463 /* Only new entries should be added to adv_entries. So, if
1464 * bdaddr was found, don't add it. */
1465 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1466 return 0;
1467
1468 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1469 if (!entry)
1470 return -ENOMEM;
1471
1472 bacpy(&entry->bdaddr, &ev->bdaddr);
1473 entry->bdaddr_type = ev->bdaddr_type;
1474
1475 list_add(&entry->list, &hdev->adv_entries);
1476
1477 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1478 batostr(&entry->bdaddr), entry->bdaddr_type);
1479
1480 return 0;
1481}
1482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483/* Register HCI device */
1484int hci_register_dev(struct hci_dev *hdev)
1485{
1486 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001487 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001489 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1490 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491
1492 if (!hdev->open || !hdev->close || !hdev->destruct)
1493 return -EINVAL;
1494
Mat Martineau08add512011-11-02 16:18:36 -07001495 /* Do not allow HCI_AMP devices to register at index 0,
1496 * so the index can be used as the AMP controller ID.
1497 */
1498 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1499
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001500 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501
1502 /* Find first available device id */
1503 list_for_each(p, &hci_dev_list) {
1504 if (list_entry(p, struct hci_dev, list)->id != id)
1505 break;
1506 head = p; id++;
1507 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001508
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 sprintf(hdev->name, "hci%d", id);
1510 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001511 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512
1513 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001514 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
1516 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001517 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001519 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001521 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Marcel Holtmann04837f62006-07-03 10:02:33 +02001523 hdev->idle_timeout = 0;
1524 hdev->sniff_max_interval = 800;
1525 hdev->sniff_min_interval = 80;
1526
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001527 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001528 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001529 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531
1532 skb_queue_head_init(&hdev->rx_q);
1533 skb_queue_head_init(&hdev->cmd_q);
1534 skb_queue_head_init(&hdev->raw_q);
1535
Ville Tervo6bd32322011-02-16 16:32:41 +02001536 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1537
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301538 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001539 hdev->reassembly[i] = NULL;
1540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001542 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 inquiry_cache_init(hdev);
1545
1546 hci_conn_hash_init(hdev);
1547
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001548 INIT_LIST_HEAD(&hdev->mgmt_pending);
1549
David Millerea4bd8b2010-07-30 21:54:49 -07001550 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001551
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001552 INIT_LIST_HEAD(&hdev->uuids);
1553
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001554 INIT_LIST_HEAD(&hdev->link_keys);
1555
Szymon Janc2763eda2011-03-22 13:12:22 +01001556 INIT_LIST_HEAD(&hdev->remote_oob_data);
1557
Andre Guedes76c86862011-05-26 16:23:50 -03001558 INIT_LIST_HEAD(&hdev->adv_entries);
1559
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001560 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001561 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001562 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001563
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001564 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1565
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1567
1568 atomic_set(&hdev->promisc, 0);
1569
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001570 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001572 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1573 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001574 if (!hdev->workqueue) {
1575 error = -ENOMEM;
1576 goto err;
1577 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001578
David Herrmann33ca9542011-10-08 14:58:49 +02001579 error = hci_add_sysfs(hdev);
1580 if (error < 0)
1581 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001583 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1584 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1585 if (hdev->rfkill) {
1586 if (rfkill_register(hdev->rfkill) < 0) {
1587 rfkill_destroy(hdev->rfkill);
1588 hdev->rfkill = NULL;
1589 }
1590 }
1591
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001592 set_bit(HCI_AUTO_OFF, &hdev->flags);
1593 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001594 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001595
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 hci_notify(hdev, HCI_DEV_REG);
1597
1598 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001599
David Herrmann33ca9542011-10-08 14:58:49 +02001600err_wqueue:
1601 destroy_workqueue(hdev->workqueue);
1602err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001603 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001604 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001605 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001606
David Herrmann33ca9542011-10-08 14:58:49 +02001607 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608}
1609EXPORT_SYMBOL(hci_register_dev);
1610
1611/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001612void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613{
Marcel Holtmannef222012007-07-11 06:42:04 +02001614 int i;
1615
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001616 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001618 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001620 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
1622 hci_dev_do_close(hdev);
1623
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301624 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001625 kfree_skb(hdev->reassembly[i]);
1626
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001627 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001628 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001629 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001630 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001631 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001632 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001633
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001634 /* mgmt_index_removed should take care of emptying the
1635 * pending list */
1636 BUG_ON(!list_empty(&hdev->mgmt_pending));
1637
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638 hci_notify(hdev, HCI_DEV_UNREG);
1639
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001640 if (hdev->rfkill) {
1641 rfkill_unregister(hdev->rfkill);
1642 rfkill_destroy(hdev->rfkill);
1643 }
1644
David Herrmannce242972011-10-08 14:58:48 +02001645 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001646
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001647 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001648
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001649 destroy_workqueue(hdev->workqueue);
1650
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001651 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001652 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001653 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001654 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001655 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001656 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001657 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001658
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
1661EXPORT_SYMBOL(hci_unregister_dev);
1662
1663/* Suspend HCI device */
1664int hci_suspend_dev(struct hci_dev *hdev)
1665{
1666 hci_notify(hdev, HCI_DEV_SUSPEND);
1667 return 0;
1668}
1669EXPORT_SYMBOL(hci_suspend_dev);
1670
1671/* Resume HCI device */
1672int hci_resume_dev(struct hci_dev *hdev)
1673{
1674 hci_notify(hdev, HCI_DEV_RESUME);
1675 return 0;
1676}
1677EXPORT_SYMBOL(hci_resume_dev);
1678
Marcel Holtmann76bca882009-11-18 00:40:39 +01001679/* Receive frame from HCI drivers */
1680int hci_recv_frame(struct sk_buff *skb)
1681{
1682 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1683 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1684 && !test_bit(HCI_INIT, &hdev->flags))) {
1685 kfree_skb(skb);
1686 return -ENXIO;
1687 }
1688
1689 /* Incomming skb */
1690 bt_cb(skb)->incoming = 1;
1691
1692 /* Time stamp */
1693 __net_timestamp(skb);
1694
Marcel Holtmann76bca882009-11-18 00:40:39 +01001695 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001696 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001697
Marcel Holtmann76bca882009-11-18 00:40:39 +01001698 return 0;
1699}
1700EXPORT_SYMBOL(hci_recv_frame);
1701
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301702static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001703 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301704{
1705 int len = 0;
1706 int hlen = 0;
1707 int remain = count;
1708 struct sk_buff *skb;
1709 struct bt_skb_cb *scb;
1710
1711 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1712 index >= NUM_REASSEMBLY)
1713 return -EILSEQ;
1714
1715 skb = hdev->reassembly[index];
1716
1717 if (!skb) {
1718 switch (type) {
1719 case HCI_ACLDATA_PKT:
1720 len = HCI_MAX_FRAME_SIZE;
1721 hlen = HCI_ACL_HDR_SIZE;
1722 break;
1723 case HCI_EVENT_PKT:
1724 len = HCI_MAX_EVENT_SIZE;
1725 hlen = HCI_EVENT_HDR_SIZE;
1726 break;
1727 case HCI_SCODATA_PKT:
1728 len = HCI_MAX_SCO_SIZE;
1729 hlen = HCI_SCO_HDR_SIZE;
1730 break;
1731 }
1732
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001733 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301734 if (!skb)
1735 return -ENOMEM;
1736
1737 scb = (void *) skb->cb;
1738 scb->expect = hlen;
1739 scb->pkt_type = type;
1740
1741 skb->dev = (void *) hdev;
1742 hdev->reassembly[index] = skb;
1743 }
1744
1745 while (count) {
1746 scb = (void *) skb->cb;
1747 len = min(scb->expect, (__u16)count);
1748
1749 memcpy(skb_put(skb, len), data, len);
1750
1751 count -= len;
1752 data += len;
1753 scb->expect -= len;
1754 remain = count;
1755
1756 switch (type) {
1757 case HCI_EVENT_PKT:
1758 if (skb->len == HCI_EVENT_HDR_SIZE) {
1759 struct hci_event_hdr *h = hci_event_hdr(skb);
1760 scb->expect = h->plen;
1761
1762 if (skb_tailroom(skb) < scb->expect) {
1763 kfree_skb(skb);
1764 hdev->reassembly[index] = NULL;
1765 return -ENOMEM;
1766 }
1767 }
1768 break;
1769
1770 case HCI_ACLDATA_PKT:
1771 if (skb->len == HCI_ACL_HDR_SIZE) {
1772 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1773 scb->expect = __le16_to_cpu(h->dlen);
1774
1775 if (skb_tailroom(skb) < scb->expect) {
1776 kfree_skb(skb);
1777 hdev->reassembly[index] = NULL;
1778 return -ENOMEM;
1779 }
1780 }
1781 break;
1782
1783 case HCI_SCODATA_PKT:
1784 if (skb->len == HCI_SCO_HDR_SIZE) {
1785 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1786 scb->expect = h->dlen;
1787
1788 if (skb_tailroom(skb) < scb->expect) {
1789 kfree_skb(skb);
1790 hdev->reassembly[index] = NULL;
1791 return -ENOMEM;
1792 }
1793 }
1794 break;
1795 }
1796
1797 if (scb->expect == 0) {
1798 /* Complete frame */
1799
1800 bt_cb(skb)->pkt_type = type;
1801 hci_recv_frame(skb);
1802
1803 hdev->reassembly[index] = NULL;
1804 return remain;
1805 }
1806 }
1807
1808 return remain;
1809}
1810
Marcel Holtmannef222012007-07-11 06:42:04 +02001811int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1812{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301813 int rem = 0;
1814
Marcel Holtmannef222012007-07-11 06:42:04 +02001815 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1816 return -EILSEQ;
1817
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001818 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001819 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301820 if (rem < 0)
1821 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001822
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301823 data += (count - rem);
1824 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001825 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001826
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301827 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001828}
1829EXPORT_SYMBOL(hci_recv_fragment);
1830
Suraj Sumangala99811512010-07-14 13:02:19 +05301831#define STREAM_REASSEMBLY 0
1832
1833int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1834{
1835 int type;
1836 int rem = 0;
1837
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001838 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301839 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1840
1841 if (!skb) {
1842 struct { char type; } *pkt;
1843
1844 /* Start of the frame */
1845 pkt = data;
1846 type = pkt->type;
1847
1848 data++;
1849 count--;
1850 } else
1851 type = bt_cb(skb)->pkt_type;
1852
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001853 rem = hci_reassembly(hdev, type, data, count,
1854 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301855 if (rem < 0)
1856 return rem;
1857
1858 data += (count - rem);
1859 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001860 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301861
1862 return rem;
1863}
1864EXPORT_SYMBOL(hci_recv_stream_fragment);
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866/* ---- Interface to upper protocols ---- */
1867
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868int hci_register_cb(struct hci_cb *cb)
1869{
1870 BT_DBG("%p name %s", cb, cb->name);
1871
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001872 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001874 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
1876 return 0;
1877}
1878EXPORT_SYMBOL(hci_register_cb);
1879
1880int hci_unregister_cb(struct hci_cb *cb)
1881{
1882 BT_DBG("%p name %s", cb, cb->name);
1883
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001884 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001886 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888 return 0;
1889}
1890EXPORT_SYMBOL(hci_unregister_cb);
1891
1892static int hci_send_frame(struct sk_buff *skb)
1893{
1894 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1895
1896 if (!hdev) {
1897 kfree_skb(skb);
1898 return -ENODEV;
1899 }
1900
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001901 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
1903 if (atomic_read(&hdev->promisc)) {
1904 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001905 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001907 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 }
1909
1910 /* Get rid of skb owner, prior to sending to the driver. */
1911 skb_orphan(skb);
1912
1913 return hdev->send(skb);
1914}
1915
1916/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001917int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918{
1919 int len = HCI_COMMAND_HDR_SIZE + plen;
1920 struct hci_command_hdr *hdr;
1921 struct sk_buff *skb;
1922
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001923 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
1925 skb = bt_skb_alloc(len, GFP_ATOMIC);
1926 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001927 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 return -ENOMEM;
1929 }
1930
1931 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001932 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 hdr->plen = plen;
1934
1935 if (plen)
1936 memcpy(skb_put(skb, plen), param, plen);
1937
1938 BT_DBG("skb len %d", skb->len);
1939
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001940 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001942
Johan Hedberga5040ef2011-01-10 13:28:59 +02001943 if (test_bit(HCI_INIT, &hdev->flags))
1944 hdev->init_last_cmd = opcode;
1945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001947 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
1949 return 0;
1950}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951
1952/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001953void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
1955 struct hci_command_hdr *hdr;
1956
1957 if (!hdev->sent_cmd)
1958 return NULL;
1959
1960 hdr = (void *) hdev->sent_cmd->data;
1961
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001962 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 return NULL;
1964
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001965 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966
1967 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1968}
1969
1970/* Send ACL data */
1971static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1972{
1973 struct hci_acl_hdr *hdr;
1974 int len = skb->len;
1975
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001976 skb_push(skb, HCI_ACL_HDR_SIZE);
1977 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001978 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001979 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1980 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981}
1982
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001983static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1984 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 struct hci_dev *hdev = conn->hdev;
1987 struct sk_buff *list;
1988
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001989 list = skb_shinfo(skb)->frag_list;
1990 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 /* Non fragmented */
1992 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1993
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001994 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 } else {
1996 /* Fragmented */
1997 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1998
1999 skb_shinfo(skb)->frag_list = NULL;
2000
2001 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002002 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002004 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002005
2006 flags &= ~ACL_START;
2007 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 do {
2009 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002010
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002012 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002013 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2016
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002017 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 } while (list);
2019
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002020 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002022}
2023
2024void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2025{
2026 struct hci_conn *conn = chan->conn;
2027 struct hci_dev *hdev = conn->hdev;
2028
2029 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2030
2031 skb->dev = (void *) hdev;
2032 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2033 hci_add_acl_hdr(skb, conn->handle, flags);
2034
2035 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002037 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038}
2039EXPORT_SYMBOL(hci_send_acl);
2040
2041/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002042void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043{
2044 struct hci_dev *hdev = conn->hdev;
2045 struct hci_sco_hdr hdr;
2046
2047 BT_DBG("%s len %d", hdev->name, skb->len);
2048
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002049 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 hdr.dlen = skb->len;
2051
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002052 skb_push(skb, HCI_SCO_HDR_SIZE);
2053 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002054 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002057 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002060 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061}
2062EXPORT_SYMBOL(hci_send_sco);
2063
2064/* ---- HCI TX task (outgoing data) ---- */
2065
2066/* HCI Connection scheduler */
2067static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2068{
2069 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002070 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002073 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002075
2076 rcu_read_lock();
2077
2078 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002079 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002081
2082 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2083 continue;
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 num++;
2086
2087 if (c->sent < min) {
2088 min = c->sent;
2089 conn = c;
2090 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002091
2092 if (hci_conn_num(hdev, type) == num)
2093 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
2095
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002096 rcu_read_unlock();
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002099 int cnt, q;
2100
2101 switch (conn->type) {
2102 case ACL_LINK:
2103 cnt = hdev->acl_cnt;
2104 break;
2105 case SCO_LINK:
2106 case ESCO_LINK:
2107 cnt = hdev->sco_cnt;
2108 break;
2109 case LE_LINK:
2110 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2111 break;
2112 default:
2113 cnt = 0;
2114 BT_ERR("Unknown link type");
2115 }
2116
2117 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 *quote = q ? q : 1;
2119 } else
2120 *quote = 0;
2121
2122 BT_DBG("conn %p quote %d", conn, *quote);
2123 return conn;
2124}
2125
Ville Tervobae1f5d92011-02-10 22:38:53 -03002126static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
2128 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002129 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Ville Tervobae1f5d92011-02-10 22:38:53 -03002131 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002133 rcu_read_lock();
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002136 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002137 if (c->type == type && c->sent) {
2138 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 hdev->name, batostr(&c->dst));
2140 hci_acl_disconn(c, 0x13);
2141 }
2142 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002143
2144 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145}
2146
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002147static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2148 int *quote)
2149{
2150 struct hci_conn_hash *h = &hdev->conn_hash;
2151 struct hci_chan *chan = NULL;
2152 int num = 0, min = ~0, cur_prio = 0;
2153 struct hci_conn *conn;
2154 int cnt, q, conn_num = 0;
2155
2156 BT_DBG("%s", hdev->name);
2157
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002158 rcu_read_lock();
2159
2160 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002161 struct hci_chan *tmp;
2162
2163 if (conn->type != type)
2164 continue;
2165
2166 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2167 continue;
2168
2169 conn_num++;
2170
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002171 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002172 struct sk_buff *skb;
2173
2174 if (skb_queue_empty(&tmp->data_q))
2175 continue;
2176
2177 skb = skb_peek(&tmp->data_q);
2178 if (skb->priority < cur_prio)
2179 continue;
2180
2181 if (skb->priority > cur_prio) {
2182 num = 0;
2183 min = ~0;
2184 cur_prio = skb->priority;
2185 }
2186
2187 num++;
2188
2189 if (conn->sent < min) {
2190 min = conn->sent;
2191 chan = tmp;
2192 }
2193 }
2194
2195 if (hci_conn_num(hdev, type) == conn_num)
2196 break;
2197 }
2198
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002199 rcu_read_unlock();
2200
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002201 if (!chan)
2202 return NULL;
2203
2204 switch (chan->conn->type) {
2205 case ACL_LINK:
2206 cnt = hdev->acl_cnt;
2207 break;
2208 case SCO_LINK:
2209 case ESCO_LINK:
2210 cnt = hdev->sco_cnt;
2211 break;
2212 case LE_LINK:
2213 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2214 break;
2215 default:
2216 cnt = 0;
2217 BT_ERR("Unknown link type");
2218 }
2219
2220 q = cnt / num;
2221 *quote = q ? q : 1;
2222 BT_DBG("chan %p quote %d", chan, *quote);
2223 return chan;
2224}
2225
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002226static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2227{
2228 struct hci_conn_hash *h = &hdev->conn_hash;
2229 struct hci_conn *conn;
2230 int num = 0;
2231
2232 BT_DBG("%s", hdev->name);
2233
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002234 rcu_read_lock();
2235
2236 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002237 struct hci_chan *chan;
2238
2239 if (conn->type != type)
2240 continue;
2241
2242 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2243 continue;
2244
2245 num++;
2246
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002247 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002248 struct sk_buff *skb;
2249
2250 if (chan->sent) {
2251 chan->sent = 0;
2252 continue;
2253 }
2254
2255 if (skb_queue_empty(&chan->data_q))
2256 continue;
2257
2258 skb = skb_peek(&chan->data_q);
2259 if (skb->priority >= HCI_PRIO_MAX - 1)
2260 continue;
2261
2262 skb->priority = HCI_PRIO_MAX - 1;
2263
2264 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2265 skb->priority);
2266 }
2267
2268 if (hci_conn_num(hdev, type) == num)
2269 break;
2270 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002271
2272 rcu_read_unlock();
2273
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002274}
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276static inline void hci_sched_acl(struct hci_dev *hdev)
2277{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002278 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 struct sk_buff *skb;
2280 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002281 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 BT_DBG("%s", hdev->name);
2284
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002285 if (!hci_conn_num(hdev, ACL_LINK))
2286 return;
2287
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 if (!test_bit(HCI_RAW, &hdev->flags)) {
2289 /* ACL tx timeout must be longer than maximum
2290 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002291 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002292 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 }
2294
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002295 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002296
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002297 while (hdev->acl_cnt &&
2298 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002299 u32 priority = (skb_peek(&chan->data_q))->priority;
2300 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002301 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2302 skb->len, skb->priority);
2303
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002304 /* Stop if priority has changed */
2305 if (skb->priority < priority)
2306 break;
2307
2308 skb = skb_dequeue(&chan->data_q);
2309
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002310 hci_conn_enter_active_mode(chan->conn,
2311 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002312
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 hci_send_frame(skb);
2314 hdev->acl_last_tx = jiffies;
2315
2316 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002317 chan->sent++;
2318 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 }
2320 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002321
2322 if (cnt != hdev->acl_cnt)
2323 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324}
2325
2326/* Schedule SCO */
2327static inline void hci_sched_sco(struct hci_dev *hdev)
2328{
2329 struct hci_conn *conn;
2330 struct sk_buff *skb;
2331 int quote;
2332
2333 BT_DBG("%s", hdev->name);
2334
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002335 if (!hci_conn_num(hdev, SCO_LINK))
2336 return;
2337
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2339 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2340 BT_DBG("skb %p len %d", skb, skb->len);
2341 hci_send_frame(skb);
2342
2343 conn->sent++;
2344 if (conn->sent == ~0)
2345 conn->sent = 0;
2346 }
2347 }
2348}
2349
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002350static inline void hci_sched_esco(struct hci_dev *hdev)
2351{
2352 struct hci_conn *conn;
2353 struct sk_buff *skb;
2354 int quote;
2355
2356 BT_DBG("%s", hdev->name);
2357
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002358 if (!hci_conn_num(hdev, ESCO_LINK))
2359 return;
2360
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002361 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2362 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2363 BT_DBG("skb %p len %d", skb, skb->len);
2364 hci_send_frame(skb);
2365
2366 conn->sent++;
2367 if (conn->sent == ~0)
2368 conn->sent = 0;
2369 }
2370 }
2371}
2372
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002373static inline void hci_sched_le(struct hci_dev *hdev)
2374{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002375 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002376 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002377 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002378
2379 BT_DBG("%s", hdev->name);
2380
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002381 if (!hci_conn_num(hdev, LE_LINK))
2382 return;
2383
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002384 if (!test_bit(HCI_RAW, &hdev->flags)) {
2385 /* LE tx timeout must be longer than maximum
2386 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002387 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002388 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002389 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002390 }
2391
2392 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002393 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002394 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002395 u32 priority = (skb_peek(&chan->data_q))->priority;
2396 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002397 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2398 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002399
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002400 /* Stop if priority has changed */
2401 if (skb->priority < priority)
2402 break;
2403
2404 skb = skb_dequeue(&chan->data_q);
2405
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002406 hci_send_frame(skb);
2407 hdev->le_last_tx = jiffies;
2408
2409 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002410 chan->sent++;
2411 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002412 }
2413 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002414
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002415 if (hdev->le_pkts)
2416 hdev->le_cnt = cnt;
2417 else
2418 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002419
2420 if (cnt != tmp)
2421 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002422}
2423
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002424static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002426 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 struct sk_buff *skb;
2428
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002429 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2430 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431
2432 /* Schedule queues and send stuff to HCI driver */
2433
2434 hci_sched_acl(hdev);
2435
2436 hci_sched_sco(hdev);
2437
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002438 hci_sched_esco(hdev);
2439
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002440 hci_sched_le(hdev);
2441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 /* Send next queued raw (unknown type) packet */
2443 while ((skb = skb_dequeue(&hdev->raw_q)))
2444 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445}
2446
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002447/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448
2449/* ACL data packet */
2450static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2451{
2452 struct hci_acl_hdr *hdr = (void *) skb->data;
2453 struct hci_conn *conn;
2454 __u16 handle, flags;
2455
2456 skb_pull(skb, HCI_ACL_HDR_SIZE);
2457
2458 handle = __le16_to_cpu(hdr->handle);
2459 flags = hci_flags(handle);
2460 handle = hci_handle(handle);
2461
2462 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2463
2464 hdev->stat.acl_rx++;
2465
2466 hci_dev_lock(hdev);
2467 conn = hci_conn_hash_lookup_handle(hdev, handle);
2468 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002469
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002471 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002472
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002474 l2cap_recv_acldata(conn, skb, flags);
2475 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002476 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002477 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 hdev->name, handle);
2479 }
2480
2481 kfree_skb(skb);
2482}
2483
2484/* SCO data packet */
2485static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2486{
2487 struct hci_sco_hdr *hdr = (void *) skb->data;
2488 struct hci_conn *conn;
2489 __u16 handle;
2490
2491 skb_pull(skb, HCI_SCO_HDR_SIZE);
2492
2493 handle = __le16_to_cpu(hdr->handle);
2494
2495 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2496
2497 hdev->stat.sco_rx++;
2498
2499 hci_dev_lock(hdev);
2500 conn = hci_conn_hash_lookup_handle(hdev, handle);
2501 hci_dev_unlock(hdev);
2502
2503 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002505 sco_recv_scodata(conn, skb);
2506 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002508 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 hdev->name, handle);
2510 }
2511
2512 kfree_skb(skb);
2513}
2514
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002515static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002517 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 struct sk_buff *skb;
2519
2520 BT_DBG("%s", hdev->name);
2521
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522 while ((skb = skb_dequeue(&hdev->rx_q))) {
2523 if (atomic_read(&hdev->promisc)) {
2524 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002525 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 }
2527
2528 if (test_bit(HCI_RAW, &hdev->flags)) {
2529 kfree_skb(skb);
2530 continue;
2531 }
2532
2533 if (test_bit(HCI_INIT, &hdev->flags)) {
2534 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002535 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 case HCI_ACLDATA_PKT:
2537 case HCI_SCODATA_PKT:
2538 kfree_skb(skb);
2539 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 }
2542
2543 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002544 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002546 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 hci_event_packet(hdev, skb);
2548 break;
2549
2550 case HCI_ACLDATA_PKT:
2551 BT_DBG("%s ACL data packet", hdev->name);
2552 hci_acldata_packet(hdev, skb);
2553 break;
2554
2555 case HCI_SCODATA_PKT:
2556 BT_DBG("%s SCO data packet", hdev->name);
2557 hci_scodata_packet(hdev, skb);
2558 break;
2559
2560 default:
2561 kfree_skb(skb);
2562 break;
2563 }
2564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565}
2566
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002567static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002569 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 struct sk_buff *skb;
2571
2572 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2573
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002575 if (atomic_read(&hdev->cmd_cnt)) {
2576 skb = skb_dequeue(&hdev->cmd_q);
2577 if (!skb)
2578 return;
2579
Wei Yongjun7585b972009-02-25 18:29:52 +08002580 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002582 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2583 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 atomic_dec(&hdev->cmd_cnt);
2585 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002586 if (test_bit(HCI_RESET, &hdev->flags))
2587 del_timer(&hdev->cmd_timer);
2588 else
2589 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002590 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 } else {
2592 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002593 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 }
2595 }
2596}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002597
2598int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2599{
2600 /* General inquiry access code (GIAC) */
2601 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2602 struct hci_cp_inquiry cp;
2603
2604 BT_DBG("%s", hdev->name);
2605
2606 if (test_bit(HCI_INQUIRY, &hdev->flags))
2607 return -EINPROGRESS;
2608
2609 memset(&cp, 0, sizeof(cp));
2610 memcpy(&cp.lap, lap, sizeof(cp.lap));
2611 cp.length = length;
2612
2613 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2614}
Andre Guedes023d50492011-11-04 14:16:52 -03002615
2616int hci_cancel_inquiry(struct hci_dev *hdev)
2617{
2618 BT_DBG("%s", hdev->name);
2619
2620 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2621 return -EPERM;
2622
2623 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2624}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002625
2626module_param(enable_hs, bool, 0644);
2627MODULE_PARM_DESC(enable_hs, "Enable High Speed");