blob: 162176151db9f681ae99d53fb1d5d0a0d3b92767 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
Johan Hedberg561aafb2012-01-04 13:31:59 +0200360 struct inquiry_cache *cache = &hdev->inq_cache;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200361 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Johan Hedberg561aafb2012-01-04 13:31:59 +0200363 list_for_each_entry_safe(p, n, &cache->all, all) {
364 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200365 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367
368 INIT_LIST_HEAD(&cache->unknown);
369 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200381 return e;
382 }
383
384 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
386
Johan Hedberg561aafb2012-01-04 13:31:59 +0200387struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
388 bdaddr_t *bdaddr)
389{
390 struct inquiry_cache *cache = &hdev->inq_cache;
391 struct inquiry_entry *e;
392
393 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
394
395 list_for_each_entry(e, &cache->unknown, list) {
396 if (!bacmp(&e->data.bdaddr, bdaddr))
397 return e;
398 }
399
400 return NULL;
401}
402
Johan Hedberg31754052012-01-04 13:39:52 +0200403bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg561aafb2012-01-04 13:31:59 +0200404 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
406 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200407 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
409 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
410
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200411 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412 if (ie)
413 goto update;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415 /* Entry not in the cache. Add new one. */
416 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
417 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200418 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200419
420 list_add(&ie->all, &cache->all);
421
422 if (name_known) {
423 ie->name_state = NAME_KNOWN;
424 } else {
425 ie->name_state = NAME_NOT_KNOWN;
426 list_add(&ie->list, &cache->unknown);
427 }
428
429update:
430 if (name_known && ie->name_state != NAME_KNOWN &&
431 ie->name_state != NAME_PENDING) {
432 ie->name_state = NAME_KNOWN;
433 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 }
435
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200436 memcpy(&ie->data, data, sizeof(*data));
437 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200439
440 if (ie->name_state == NAME_NOT_KNOWN)
441 return false;
442
443 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444}
445
446static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
447{
448 struct inquiry_cache *cache = &hdev->inq_cache;
449 struct inquiry_info *info = (struct inquiry_info *) buf;
450 struct inquiry_entry *e;
451 int copied = 0;
452
Johan Hedberg561aafb2012-01-04 13:31:59 +0200453 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200455
456 if (copied >= num)
457 break;
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 bacpy(&info->bdaddr, &data->bdaddr);
460 info->pscan_rep_mode = data->pscan_rep_mode;
461 info->pscan_period_mode = data->pscan_period_mode;
462 info->pscan_mode = data->pscan_mode;
463 memcpy(info->dev_class, data->dev_class, 3);
464 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200467 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 }
469
470 BT_DBG("cache %p, copied %d", cache, copied);
471 return copied;
472}
473
474static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
475{
476 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
477 struct hci_cp_inquiry cp;
478
479 BT_DBG("%s", hdev->name);
480
481 if (test_bit(HCI_INQUIRY, &hdev->flags))
482 return;
483
484 /* Start Inquiry */
485 memcpy(&cp.lap, &ir->lap, 3);
486 cp.length = ir->length;
487 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200488 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489}
490
491int hci_inquiry(void __user *arg)
492{
493 __u8 __user *ptr = arg;
494 struct hci_inquiry_req ir;
495 struct hci_dev *hdev;
496 int err = 0, do_inquiry = 0, max_rsp;
497 long timeo;
498 __u8 *buf;
499
500 if (copy_from_user(&ir, ptr, sizeof(ir)))
501 return -EFAULT;
502
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200503 hdev = hci_dev_get(ir.dev_id);
504 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 return -ENODEV;
506
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300507 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900508 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200509 inquiry_cache_empty(hdev) ||
510 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 inquiry_cache_flush(hdev);
512 do_inquiry = 1;
513 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300514 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515
Marcel Holtmann04837f62006-07-03 10:02:33 +0200516 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200517
518 if (do_inquiry) {
519 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
520 if (err < 0)
521 goto done;
522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523
524 /* for unlimited number of responses we will use buffer with 255 entries */
525 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
526
527 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
528 * copy it to the user space.
529 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100530 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200531 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 err = -ENOMEM;
533 goto done;
534 }
535
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300536 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300538 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540 BT_DBG("num_rsp %d", ir.num_rsp);
541
542 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
543 ptr += sizeof(ir);
544 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
545 ir.num_rsp))
546 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 err = -EFAULT;
549
550 kfree(buf);
551
552done:
553 hci_dev_put(hdev);
554 return err;
555}
556
557/* ---- HCI ioctl helpers ---- */
558
559int hci_dev_open(__u16 dev)
560{
561 struct hci_dev *hdev;
562 int ret = 0;
563
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200564 hdev = hci_dev_get(dev);
565 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return -ENODEV;
567
568 BT_DBG("%s %p", hdev->name, hdev);
569
570 hci_req_lock(hdev);
571
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200572 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
573 ret = -ERFKILL;
574 goto done;
575 }
576
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 if (test_bit(HCI_UP, &hdev->flags)) {
578 ret = -EALREADY;
579 goto done;
580 }
581
582 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
583 set_bit(HCI_RAW, &hdev->flags);
584
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200585 /* Treat all non BR/EDR controllers as raw devices if
586 enable_hs is not set */
587 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100588 set_bit(HCI_RAW, &hdev->flags);
589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 if (hdev->open(hdev)) {
591 ret = -EIO;
592 goto done;
593 }
594
595 if (!test_bit(HCI_RAW, &hdev->flags)) {
596 atomic_set(&hdev->cmd_cnt, 1);
597 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200598 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
Marcel Holtmann04837f62006-07-03 10:02:33 +0200600 ret = __hci_request(hdev, hci_init_req, 0,
601 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Andre Guedeseead27d2011-06-30 19:20:55 -0300603 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300604 ret = __hci_request(hdev, hci_le_init_req, 0,
605 msecs_to_jiffies(HCI_INIT_TIMEOUT));
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 clear_bit(HCI_INIT, &hdev->flags);
608 }
609
610 if (!ret) {
611 hci_dev_hold(hdev);
612 set_bit(HCI_UP, &hdev->flags);
613 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200614 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300615 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200616 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300617 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200618 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900619 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200621 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200622 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400623 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->rx_q);
627
628 if (hdev->flush)
629 hdev->flush(hdev);
630
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
634 }
635
636 hdev->close(hdev);
637 hdev->flags = 0;
638 }
639
640done:
641 hci_req_unlock(hdev);
642 hci_dev_put(hdev);
643 return ret;
644}
645
646static int hci_dev_do_close(struct hci_dev *hdev)
647{
648 BT_DBG("%s %p", hdev->name, hdev);
649
650 hci_req_cancel(hdev, ENODEV);
651 hci_req_lock(hdev);
652
653 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300654 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 hci_req_unlock(hdev);
656 return 0;
657 }
658
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200659 /* Flush RX and TX works */
660 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400661 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200663 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200664 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200665 hdev->discov_timeout = 0;
666 }
667
Johan Hedberg32435532011-11-07 22:16:04 +0200668 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200669 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200670
Johan Hedberg7d785252011-12-15 00:47:39 +0200671 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
672 cancel_delayed_work(&hdev->service_cache);
673
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300674 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 inquiry_cache_flush(hdev);
676 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300677 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 hci_notify(hdev, HCI_DEV_DOWN);
680
681 if (hdev->flush)
682 hdev->flush(hdev);
683
684 /* Reset device */
685 skb_queue_purge(&hdev->cmd_q);
686 atomic_set(&hdev->cmd_cnt, 1);
687 if (!test_bit(HCI_RAW, &hdev->flags)) {
688 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200690 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 clear_bit(HCI_INIT, &hdev->flags);
692 }
693
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200694 /* flush cmd work */
695 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
697 /* Drop queues */
698 skb_queue_purge(&hdev->rx_q);
699 skb_queue_purge(&hdev->cmd_q);
700 skb_queue_purge(&hdev->raw_q);
701
702 /* Drop last sent command */
703 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300704 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 kfree_skb(hdev->sent_cmd);
706 hdev->sent_cmd = NULL;
707 }
708
709 /* After this point our queues are empty
710 * and no tasks are scheduled. */
711 hdev->close(hdev);
712
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300713 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200714 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300715 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200716
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* Clear flags */
718 hdev->flags = 0;
719
720 hci_req_unlock(hdev);
721
722 hci_dev_put(hdev);
723 return 0;
724}
725
726int hci_dev_close(__u16 dev)
727{
728 struct hci_dev *hdev;
729 int err;
730
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200731 hdev = hci_dev_get(dev);
732 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 return -ENODEV;
734 err = hci_dev_do_close(hdev);
735 hci_dev_put(hdev);
736 return err;
737}
738
739int hci_dev_reset(__u16 dev)
740{
741 struct hci_dev *hdev;
742 int ret = 0;
743
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200744 hdev = hci_dev_get(dev);
745 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 return -ENODEV;
747
748 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749
750 if (!test_bit(HCI_UP, &hdev->flags))
751 goto done;
752
753 /* Drop queues */
754 skb_queue_purge(&hdev->rx_q);
755 skb_queue_purge(&hdev->cmd_q);
756
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300757 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300760 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
762 if (hdev->flush)
763 hdev->flush(hdev);
764
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900765 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300766 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
768 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200769 ret = __hci_request(hdev, hci_reset_req, 0,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
772done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 hci_req_unlock(hdev);
774 hci_dev_put(hdev);
775 return ret;
776}
777
778int hci_dev_reset_stat(__u16 dev)
779{
780 struct hci_dev *hdev;
781 int ret = 0;
782
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200783 hdev = hci_dev_get(dev);
784 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 return -ENODEV;
786
787 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
788
789 hci_dev_put(hdev);
790
791 return ret;
792}
793
794int hci_dev_cmd(unsigned int cmd, void __user *arg)
795{
796 struct hci_dev *hdev;
797 struct hci_dev_req dr;
798 int err = 0;
799
800 if (copy_from_user(&dr, arg, sizeof(dr)))
801 return -EFAULT;
802
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200803 hdev = hci_dev_get(dr.dev_id);
804 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 return -ENODEV;
806
807 switch (cmd) {
808 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200809 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
810 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 break;
812
813 case HCISETENCRYPT:
814 if (!lmp_encrypt_capable(hdev)) {
815 err = -EOPNOTSUPP;
816 break;
817 }
818
819 if (!test_bit(HCI_AUTH, &hdev->flags)) {
820 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200821 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
822 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 if (err)
824 break;
825 }
826
Marcel Holtmann04837f62006-07-03 10:02:33 +0200827 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
828 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 break;
830
831 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200832 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
833 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 break;
835
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200836 case HCISETLINKPOL:
837 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
838 msecs_to_jiffies(HCI_INIT_TIMEOUT));
839 break;
840
841 case HCISETLINKMODE:
842 hdev->link_mode = ((__u16) dr.dev_opt) &
843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
844 break;
845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 case HCISETPTYPE:
847 hdev->pkt_type = (__u16) dr.dev_opt;
848 break;
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 break;
854
855 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 break;
859
860 default:
861 err = -EINVAL;
862 break;
863 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 hci_dev_put(hdev);
866 return err;
867}
868
869int hci_get_dev_list(void __user *arg)
870{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200871 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 struct hci_dev_list_req *dl;
873 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 int n = 0, size, err;
875 __u16 dev_num;
876
877 if (get_user(dev_num, (__u16 __user *) arg))
878 return -EFAULT;
879
880 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
881 return -EINVAL;
882
883 size = sizeof(*dl) + dev_num * sizeof(*dr);
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 dl = kzalloc(size, GFP_KERNEL);
886 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENOMEM;
888
889 dr = dl->dev_req;
890
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200891 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200892 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200893 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200894 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200895
896 if (!test_bit(HCI_MGMT, &hdev->flags))
897 set_bit(HCI_PAIRABLE, &hdev->flags);
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 (dr + n)->dev_id = hdev->id;
900 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 if (++n >= dev_num)
903 break;
904 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200905 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
907 dl->dev_num = n;
908 size = sizeof(*dl) + n * sizeof(*dr);
909
910 err = copy_to_user(arg, dl, size);
911 kfree(dl);
912
913 return err ? -EFAULT : 0;
914}
915
916int hci_get_dev_info(void __user *arg)
917{
918 struct hci_dev *hdev;
919 struct hci_dev_info di;
920 int err = 0;
921
922 if (copy_from_user(&di, arg, sizeof(di)))
923 return -EFAULT;
924
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200925 hdev = hci_dev_get(di.dev_id);
926 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 return -ENODEV;
928
Johan Hedberg32435532011-11-07 22:16:04 +0200929 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
930 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200931
Johan Hedbergc542a062011-01-26 13:11:03 +0200932 if (!test_bit(HCI_MGMT, &hdev->flags))
933 set_bit(HCI_PAIRABLE, &hdev->flags);
934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 strcpy(di.name, hdev->name);
936 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100937 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 di.flags = hdev->flags;
939 di.pkt_type = hdev->pkt_type;
940 di.acl_mtu = hdev->acl_mtu;
941 di.acl_pkts = hdev->acl_pkts;
942 di.sco_mtu = hdev->sco_mtu;
943 di.sco_pkts = hdev->sco_pkts;
944 di.link_policy = hdev->link_policy;
945 di.link_mode = hdev->link_mode;
946
947 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
948 memcpy(&di.features, &hdev->features, sizeof(di.features));
949
950 if (copy_to_user(arg, &di, sizeof(di)))
951 err = -EFAULT;
952
953 hci_dev_put(hdev);
954
955 return err;
956}
957
958/* ---- Interface to HCI drivers ---- */
959
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200960static int hci_rfkill_set_block(void *data, bool blocked)
961{
962 struct hci_dev *hdev = data;
963
964 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
965
966 if (!blocked)
967 return 0;
968
969 hci_dev_do_close(hdev);
970
971 return 0;
972}
973
974static const struct rfkill_ops hci_rfkill_ops = {
975 .set_block = hci_rfkill_set_block,
976};
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978/* Alloc HCI device */
979struct hci_dev *hci_alloc_dev(void)
980{
981 struct hci_dev *hdev;
982
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200983 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 if (!hdev)
985 return NULL;
986
David Herrmann0ac7e702011-10-08 14:58:47 +0200987 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 skb_queue_head_init(&hdev->driver_init);
989
990 return hdev;
991}
992EXPORT_SYMBOL(hci_alloc_dev);
993
994/* Free HCI device */
995void hci_free_dev(struct hci_dev *hdev)
996{
997 skb_queue_purge(&hdev->driver_init);
998
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200999 /* will free via device release */
1000 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001}
1002EXPORT_SYMBOL(hci_free_dev);
1003
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001004static void hci_power_on(struct work_struct *work)
1005{
1006 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1007
1008 BT_DBG("%s", hdev->name);
1009
1010 if (hci_dev_open(hdev->id) < 0)
1011 return;
1012
1013 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001014 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001015 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001016
1017 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001018 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001019}
1020
1021static void hci_power_off(struct work_struct *work)
1022{
Johan Hedberg32435532011-11-07 22:16:04 +02001023 struct hci_dev *hdev = container_of(work, struct hci_dev,
1024 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001025
1026 BT_DBG("%s", hdev->name);
1027
Johan Hedberg32435532011-11-07 22:16:04 +02001028 clear_bit(HCI_AUTO_OFF, &hdev->flags);
1029
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001030 hci_dev_close(hdev->id);
1031}
1032
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001033static void hci_discov_off(struct work_struct *work)
1034{
1035 struct hci_dev *hdev;
1036 u8 scan = SCAN_PAGE;
1037
1038 hdev = container_of(work, struct hci_dev, discov_off.work);
1039
1040 BT_DBG("%s", hdev->name);
1041
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001042 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001043
1044 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1045
1046 hdev->discov_timeout = 0;
1047
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001048 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001049}
1050
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001051int hci_uuids_clear(struct hci_dev *hdev)
1052{
1053 struct list_head *p, *n;
1054
1055 list_for_each_safe(p, n, &hdev->uuids) {
1056 struct bt_uuid *uuid;
1057
1058 uuid = list_entry(p, struct bt_uuid, list);
1059
1060 list_del(p);
1061 kfree(uuid);
1062 }
1063
1064 return 0;
1065}
1066
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001067int hci_link_keys_clear(struct hci_dev *hdev)
1068{
1069 struct list_head *p, *n;
1070
1071 list_for_each_safe(p, n, &hdev->link_keys) {
1072 struct link_key *key;
1073
1074 key = list_entry(p, struct link_key, list);
1075
1076 list_del(p);
1077 kfree(key);
1078 }
1079
1080 return 0;
1081}
1082
1083struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1084{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001085 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001086
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001087 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001088 if (bacmp(bdaddr, &k->bdaddr) == 0)
1089 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001090
1091 return NULL;
1092}
1093
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001094static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1095 u8 key_type, u8 old_key_type)
1096{
1097 /* Legacy key */
1098 if (key_type < 0x03)
1099 return 1;
1100
1101 /* Debug keys are insecure so don't store them persistently */
1102 if (key_type == HCI_LK_DEBUG_COMBINATION)
1103 return 0;
1104
1105 /* Changed combination key and there's no previous one */
1106 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1107 return 0;
1108
1109 /* Security mode 3 case */
1110 if (!conn)
1111 return 1;
1112
1113 /* Neither local nor remote side had no-bonding as requirement */
1114 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1115 return 1;
1116
1117 /* Local side had dedicated bonding as requirement */
1118 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1119 return 1;
1120
1121 /* Remote side had dedicated bonding as requirement */
1122 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1123 return 1;
1124
1125 /* If none of the above criteria match, then don't store the key
1126 * persistently */
1127 return 0;
1128}
1129
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001130struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1131{
1132 struct link_key *k;
1133
1134 list_for_each_entry(k, &hdev->link_keys, list) {
1135 struct key_master_id *id;
1136
1137 if (k->type != HCI_LK_SMP_LTK)
1138 continue;
1139
1140 if (k->dlen != sizeof(*id))
1141 continue;
1142
1143 id = (void *) &k->data;
1144 if (id->ediv == ediv &&
1145 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1146 return k;
1147 }
1148
1149 return NULL;
1150}
1151EXPORT_SYMBOL(hci_find_ltk);
1152
1153struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1154 bdaddr_t *bdaddr, u8 type)
1155{
1156 struct link_key *k;
1157
1158 list_for_each_entry(k, &hdev->link_keys, list)
1159 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1160 return k;
1161
1162 return NULL;
1163}
1164EXPORT_SYMBOL(hci_find_link_key_type);
1165
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001166int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1167 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001168{
1169 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001170 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001171
1172 old_key = hci_find_link_key(hdev, bdaddr);
1173 if (old_key) {
1174 old_key_type = old_key->type;
1175 key = old_key;
1176 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001177 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001178 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1179 if (!key)
1180 return -ENOMEM;
1181 list_add(&key->list, &hdev->link_keys);
1182 }
1183
1184 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1185
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186 /* Some buggy controller combinations generate a changed
1187 * combination key for legacy pairing even when there's no
1188 * previous key */
1189 if (type == HCI_LK_CHANGED_COMBINATION &&
1190 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001191 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001192 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001193 if (conn)
1194 conn->key_type = type;
1195 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001196
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001197 bacpy(&key->bdaddr, bdaddr);
1198 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001199 key->pin_len = pin_len;
1200
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001201 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001202 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001203 else
1204 key->type = type;
1205
Johan Hedberg4df378a2011-04-28 11:29:03 -07001206 if (!new_key)
1207 return 0;
1208
1209 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1210
Johan Hedberg744cf192011-11-08 20:40:14 +02001211 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001212
1213 if (!persistent) {
1214 list_del(&key->list);
1215 kfree(key);
1216 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001217
1218 return 0;
1219}
1220
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001221int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001222 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223{
1224 struct link_key *key, *old_key;
1225 struct key_master_id *id;
1226 u8 old_key_type;
1227
1228 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1229
1230 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1231 if (old_key) {
1232 key = old_key;
1233 old_key_type = old_key->type;
1234 } else {
1235 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1236 if (!key)
1237 return -ENOMEM;
1238 list_add(&key->list, &hdev->link_keys);
1239 old_key_type = 0xff;
1240 }
1241
1242 key->dlen = sizeof(*id);
1243
1244 bacpy(&key->bdaddr, bdaddr);
1245 memcpy(key->val, ltk, sizeof(key->val));
1246 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001247 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001248
1249 id = (void *) &key->data;
1250 id->ediv = ediv;
1251 memcpy(id->rand, rand, sizeof(id->rand));
1252
1253 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001254 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255
1256 return 0;
1257}
1258
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001259int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1260{
1261 struct link_key *key;
1262
1263 key = hci_find_link_key(hdev, bdaddr);
1264 if (!key)
1265 return -ENOENT;
1266
1267 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1268
1269 list_del(&key->list);
1270 kfree(key);
1271
1272 return 0;
1273}
1274
Ville Tervo6bd32322011-02-16 16:32:41 +02001275/* HCI command timer function */
1276static void hci_cmd_timer(unsigned long arg)
1277{
1278 struct hci_dev *hdev = (void *) arg;
1279
1280 BT_ERR("%s command tx timeout", hdev->name);
1281 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001282 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001283}
1284
Szymon Janc2763eda2011-03-22 13:12:22 +01001285struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1286 bdaddr_t *bdaddr)
1287{
1288 struct oob_data *data;
1289
1290 list_for_each_entry(data, &hdev->remote_oob_data, list)
1291 if (bacmp(bdaddr, &data->bdaddr) == 0)
1292 return data;
1293
1294 return NULL;
1295}
1296
1297int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298{
1299 struct oob_data *data;
1300
1301 data = hci_find_remote_oob_data(hdev, bdaddr);
1302 if (!data)
1303 return -ENOENT;
1304
1305 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1306
1307 list_del(&data->list);
1308 kfree(data);
1309
1310 return 0;
1311}
1312
1313int hci_remote_oob_data_clear(struct hci_dev *hdev)
1314{
1315 struct oob_data *data, *n;
1316
1317 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1318 list_del(&data->list);
1319 kfree(data);
1320 }
1321
1322 return 0;
1323}
1324
1325int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1326 u8 *randomizer)
1327{
1328 struct oob_data *data;
1329
1330 data = hci_find_remote_oob_data(hdev, bdaddr);
1331
1332 if (!data) {
1333 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1334 if (!data)
1335 return -ENOMEM;
1336
1337 bacpy(&data->bdaddr, bdaddr);
1338 list_add(&data->list, &hdev->remote_oob_data);
1339 }
1340
1341 memcpy(data->hash, hash, sizeof(data->hash));
1342 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1343
1344 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1345
1346 return 0;
1347}
1348
Antti Julkub2a66aa2011-06-15 12:01:14 +03001349struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1350 bdaddr_t *bdaddr)
1351{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001352 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001353
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001354 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001355 if (bacmp(bdaddr, &b->bdaddr) == 0)
1356 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001357
1358 return NULL;
1359}
1360
1361int hci_blacklist_clear(struct hci_dev *hdev)
1362{
1363 struct list_head *p, *n;
1364
1365 list_for_each_safe(p, n, &hdev->blacklist) {
1366 struct bdaddr_list *b;
1367
1368 b = list_entry(p, struct bdaddr_list, list);
1369
1370 list_del(p);
1371 kfree(b);
1372 }
1373
1374 return 0;
1375}
1376
1377int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378{
1379 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001380
1381 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1382 return -EBADF;
1383
Antti Julku5e762442011-08-25 16:48:02 +03001384 if (hci_blacklist_lookup(hdev, bdaddr))
1385 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001386
1387 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001388 if (!entry)
1389 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001390
1391 bacpy(&entry->bdaddr, bdaddr);
1392
1393 list_add(&entry->list, &hdev->blacklist);
1394
Johan Hedberg744cf192011-11-08 20:40:14 +02001395 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001396}
1397
1398int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001401
Szymon Janc1ec918c2011-11-16 09:32:21 +01001402 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001403 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001404
1405 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001406 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001407 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001408
1409 list_del(&entry->list);
1410 kfree(entry);
1411
Johan Hedberg744cf192011-11-08 20:40:14 +02001412 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001413}
1414
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001415static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001416{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001417 struct hci_dev *hdev = container_of(work, struct hci_dev,
1418 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001419
1420 hci_dev_lock(hdev);
1421
1422 hci_adv_entries_clear(hdev);
1423
1424 hci_dev_unlock(hdev);
1425}
1426
Andre Guedes76c86862011-05-26 16:23:50 -03001427int hci_adv_entries_clear(struct hci_dev *hdev)
1428{
1429 struct adv_entry *entry, *tmp;
1430
1431 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1432 list_del(&entry->list);
1433 kfree(entry);
1434 }
1435
1436 BT_DBG("%s adv cache cleared", hdev->name);
1437
1438 return 0;
1439}
1440
1441struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1442{
1443 struct adv_entry *entry;
1444
1445 list_for_each_entry(entry, &hdev->adv_entries, list)
1446 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1447 return entry;
1448
1449 return NULL;
1450}
1451
1452static inline int is_connectable_adv(u8 evt_type)
1453{
1454 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1455 return 1;
1456
1457 return 0;
1458}
1459
1460int hci_add_adv_entry(struct hci_dev *hdev,
1461 struct hci_ev_le_advertising_info *ev)
1462{
1463 struct adv_entry *entry;
1464
1465 if (!is_connectable_adv(ev->evt_type))
1466 return -EINVAL;
1467
1468 /* Only new entries should be added to adv_entries. So, if
1469 * bdaddr was found, don't add it. */
1470 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1471 return 0;
1472
1473 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1474 if (!entry)
1475 return -ENOMEM;
1476
1477 bacpy(&entry->bdaddr, &ev->bdaddr);
1478 entry->bdaddr_type = ev->bdaddr_type;
1479
1480 list_add(&entry->list, &hdev->adv_entries);
1481
1482 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1483 batostr(&entry->bdaddr), entry->bdaddr_type);
1484
1485 return 0;
1486}
1487
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488/* Register HCI device */
1489int hci_register_dev(struct hci_dev *hdev)
1490{
1491 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001492 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001494 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1495 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 if (!hdev->open || !hdev->close || !hdev->destruct)
1498 return -EINVAL;
1499
Mat Martineau08add512011-11-02 16:18:36 -07001500 /* Do not allow HCI_AMP devices to register at index 0,
1501 * so the index can be used as the AMP controller ID.
1502 */
1503 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1504
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001505 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
1507 /* Find first available device id */
1508 list_for_each(p, &hci_dev_list) {
1509 if (list_entry(p, struct hci_dev, list)->id != id)
1510 break;
1511 head = p; id++;
1512 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 sprintf(hdev->name, "hci%d", id);
1515 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001516 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
1518 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001519 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
1521 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001522 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001524 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001526 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527
Marcel Holtmann04837f62006-07-03 10:02:33 +02001528 hdev->idle_timeout = 0;
1529 hdev->sniff_max_interval = 800;
1530 hdev->sniff_min_interval = 80;
1531
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001532 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001533 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001534 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001535
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 skb_queue_head_init(&hdev->rx_q);
1538 skb_queue_head_init(&hdev->cmd_q);
1539 skb_queue_head_init(&hdev->raw_q);
1540
Ville Tervo6bd32322011-02-16 16:32:41 +02001541 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1542
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301543 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001544 hdev->reassembly[i] = NULL;
1545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001547 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549 inquiry_cache_init(hdev);
1550
1551 hci_conn_hash_init(hdev);
1552
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001553 INIT_LIST_HEAD(&hdev->mgmt_pending);
1554
David Millerea4bd8b2010-07-30 21:54:49 -07001555 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001556
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001557 INIT_LIST_HEAD(&hdev->uuids);
1558
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001559 INIT_LIST_HEAD(&hdev->link_keys);
1560
Szymon Janc2763eda2011-03-22 13:12:22 +01001561 INIT_LIST_HEAD(&hdev->remote_oob_data);
1562
Andre Guedes76c86862011-05-26 16:23:50 -03001563 INIT_LIST_HEAD(&hdev->adv_entries);
1564
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001565 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001566 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001567 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001568
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001569 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1570
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1572
1573 atomic_set(&hdev->promisc, 0);
1574
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001575 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001577 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1578 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001579 if (!hdev->workqueue) {
1580 error = -ENOMEM;
1581 goto err;
1582 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001583
David Herrmann33ca9542011-10-08 14:58:49 +02001584 error = hci_add_sysfs(hdev);
1585 if (error < 0)
1586 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001588 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1589 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1590 if (hdev->rfkill) {
1591 if (rfkill_register(hdev->rfkill) < 0) {
1592 rfkill_destroy(hdev->rfkill);
1593 hdev->rfkill = NULL;
1594 }
1595 }
1596
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001597 set_bit(HCI_AUTO_OFF, &hdev->flags);
1598 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001599 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 hci_notify(hdev, HCI_DEV_REG);
1602
1603 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001604
David Herrmann33ca9542011-10-08 14:58:49 +02001605err_wqueue:
1606 destroy_workqueue(hdev->workqueue);
1607err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001608 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001609 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001610 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001611
David Herrmann33ca9542011-10-08 14:58:49 +02001612 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614EXPORT_SYMBOL(hci_register_dev);
1615
1616/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001617void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618{
Marcel Holtmannef222012007-07-11 06:42:04 +02001619 int i;
1620
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001621 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001623 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001625 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001626
1627 hci_dev_do_close(hdev);
1628
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301629 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001630 kfree_skb(hdev->reassembly[i]);
1631
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001632 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001633 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001634 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001635 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001636 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001637 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001638
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001639 /* mgmt_index_removed should take care of emptying the
1640 * pending list */
1641 BUG_ON(!list_empty(&hdev->mgmt_pending));
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 hci_notify(hdev, HCI_DEV_UNREG);
1644
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001645 if (hdev->rfkill) {
1646 rfkill_unregister(hdev->rfkill);
1647 rfkill_destroy(hdev->rfkill);
1648 }
1649
David Herrmannce242972011-10-08 14:58:48 +02001650 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001651
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001652 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001653
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001654 destroy_workqueue(hdev->workqueue);
1655
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001656 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001657 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001658 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001659 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001660 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001661 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001662 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665}
1666EXPORT_SYMBOL(hci_unregister_dev);
1667
1668/* Suspend HCI device */
1669int hci_suspend_dev(struct hci_dev *hdev)
1670{
1671 hci_notify(hdev, HCI_DEV_SUSPEND);
1672 return 0;
1673}
1674EXPORT_SYMBOL(hci_suspend_dev);
1675
1676/* Resume HCI device */
1677int hci_resume_dev(struct hci_dev *hdev)
1678{
1679 hci_notify(hdev, HCI_DEV_RESUME);
1680 return 0;
1681}
1682EXPORT_SYMBOL(hci_resume_dev);
1683
Marcel Holtmann76bca882009-11-18 00:40:39 +01001684/* Receive frame from HCI drivers */
1685int hci_recv_frame(struct sk_buff *skb)
1686{
1687 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1688 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1689 && !test_bit(HCI_INIT, &hdev->flags))) {
1690 kfree_skb(skb);
1691 return -ENXIO;
1692 }
1693
1694 /* Incomming skb */
1695 bt_cb(skb)->incoming = 1;
1696
1697 /* Time stamp */
1698 __net_timestamp(skb);
1699
Marcel Holtmann76bca882009-11-18 00:40:39 +01001700 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001701 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001702
Marcel Holtmann76bca882009-11-18 00:40:39 +01001703 return 0;
1704}
1705EXPORT_SYMBOL(hci_recv_frame);
1706
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301707static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001708 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301709{
1710 int len = 0;
1711 int hlen = 0;
1712 int remain = count;
1713 struct sk_buff *skb;
1714 struct bt_skb_cb *scb;
1715
1716 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1717 index >= NUM_REASSEMBLY)
1718 return -EILSEQ;
1719
1720 skb = hdev->reassembly[index];
1721
1722 if (!skb) {
1723 switch (type) {
1724 case HCI_ACLDATA_PKT:
1725 len = HCI_MAX_FRAME_SIZE;
1726 hlen = HCI_ACL_HDR_SIZE;
1727 break;
1728 case HCI_EVENT_PKT:
1729 len = HCI_MAX_EVENT_SIZE;
1730 hlen = HCI_EVENT_HDR_SIZE;
1731 break;
1732 case HCI_SCODATA_PKT:
1733 len = HCI_MAX_SCO_SIZE;
1734 hlen = HCI_SCO_HDR_SIZE;
1735 break;
1736 }
1737
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001738 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301739 if (!skb)
1740 return -ENOMEM;
1741
1742 scb = (void *) skb->cb;
1743 scb->expect = hlen;
1744 scb->pkt_type = type;
1745
1746 skb->dev = (void *) hdev;
1747 hdev->reassembly[index] = skb;
1748 }
1749
1750 while (count) {
1751 scb = (void *) skb->cb;
1752 len = min(scb->expect, (__u16)count);
1753
1754 memcpy(skb_put(skb, len), data, len);
1755
1756 count -= len;
1757 data += len;
1758 scb->expect -= len;
1759 remain = count;
1760
1761 switch (type) {
1762 case HCI_EVENT_PKT:
1763 if (skb->len == HCI_EVENT_HDR_SIZE) {
1764 struct hci_event_hdr *h = hci_event_hdr(skb);
1765 scb->expect = h->plen;
1766
1767 if (skb_tailroom(skb) < scb->expect) {
1768 kfree_skb(skb);
1769 hdev->reassembly[index] = NULL;
1770 return -ENOMEM;
1771 }
1772 }
1773 break;
1774
1775 case HCI_ACLDATA_PKT:
1776 if (skb->len == HCI_ACL_HDR_SIZE) {
1777 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1778 scb->expect = __le16_to_cpu(h->dlen);
1779
1780 if (skb_tailroom(skb) < scb->expect) {
1781 kfree_skb(skb);
1782 hdev->reassembly[index] = NULL;
1783 return -ENOMEM;
1784 }
1785 }
1786 break;
1787
1788 case HCI_SCODATA_PKT:
1789 if (skb->len == HCI_SCO_HDR_SIZE) {
1790 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1791 scb->expect = h->dlen;
1792
1793 if (skb_tailroom(skb) < scb->expect) {
1794 kfree_skb(skb);
1795 hdev->reassembly[index] = NULL;
1796 return -ENOMEM;
1797 }
1798 }
1799 break;
1800 }
1801
1802 if (scb->expect == 0) {
1803 /* Complete frame */
1804
1805 bt_cb(skb)->pkt_type = type;
1806 hci_recv_frame(skb);
1807
1808 hdev->reassembly[index] = NULL;
1809 return remain;
1810 }
1811 }
1812
1813 return remain;
1814}
1815
Marcel Holtmannef222012007-07-11 06:42:04 +02001816int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1817{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301818 int rem = 0;
1819
Marcel Holtmannef222012007-07-11 06:42:04 +02001820 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1821 return -EILSEQ;
1822
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001823 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001824 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301825 if (rem < 0)
1826 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001827
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301828 data += (count - rem);
1829 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001830 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001831
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301832 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001833}
1834EXPORT_SYMBOL(hci_recv_fragment);
1835
Suraj Sumangala99811512010-07-14 13:02:19 +05301836#define STREAM_REASSEMBLY 0
1837
1838int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1839{
1840 int type;
1841 int rem = 0;
1842
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001843 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301844 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1845
1846 if (!skb) {
1847 struct { char type; } *pkt;
1848
1849 /* Start of the frame */
1850 pkt = data;
1851 type = pkt->type;
1852
1853 data++;
1854 count--;
1855 } else
1856 type = bt_cb(skb)->pkt_type;
1857
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001858 rem = hci_reassembly(hdev, type, data, count,
1859 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301860 if (rem < 0)
1861 return rem;
1862
1863 data += (count - rem);
1864 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001865 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301866
1867 return rem;
1868}
1869EXPORT_SYMBOL(hci_recv_stream_fragment);
1870
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871/* ---- Interface to upper protocols ---- */
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873int hci_register_cb(struct hci_cb *cb)
1874{
1875 BT_DBG("%p name %s", cb, cb->name);
1876
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001877 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001879 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
1881 return 0;
1882}
1883EXPORT_SYMBOL(hci_register_cb);
1884
1885int hci_unregister_cb(struct hci_cb *cb)
1886{
1887 BT_DBG("%p name %s", cb, cb->name);
1888
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001889 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001891 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892
1893 return 0;
1894}
1895EXPORT_SYMBOL(hci_unregister_cb);
1896
1897static int hci_send_frame(struct sk_buff *skb)
1898{
1899 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1900
1901 if (!hdev) {
1902 kfree_skb(skb);
1903 return -ENODEV;
1904 }
1905
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001906 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907
1908 if (atomic_read(&hdev->promisc)) {
1909 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001910 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001912 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 }
1914
1915 /* Get rid of skb owner, prior to sending to the driver. */
1916 skb_orphan(skb);
1917
1918 return hdev->send(skb);
1919}
1920
1921/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001922int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923{
1924 int len = HCI_COMMAND_HDR_SIZE + plen;
1925 struct hci_command_hdr *hdr;
1926 struct sk_buff *skb;
1927
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001928 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 skb = bt_skb_alloc(len, GFP_ATOMIC);
1931 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001932 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 return -ENOMEM;
1934 }
1935
1936 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001937 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938 hdr->plen = plen;
1939
1940 if (plen)
1941 memcpy(skb_put(skb, plen), param, plen);
1942
1943 BT_DBG("skb len %d", skb->len);
1944
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001945 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001947
Johan Hedberga5040ef2011-01-10 13:28:59 +02001948 if (test_bit(HCI_INIT, &hdev->flags))
1949 hdev->init_last_cmd = opcode;
1950
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001952 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
1954 return 0;
1955}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956
1957/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001958void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959{
1960 struct hci_command_hdr *hdr;
1961
1962 if (!hdev->sent_cmd)
1963 return NULL;
1964
1965 hdr = (void *) hdev->sent_cmd->data;
1966
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001967 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 return NULL;
1969
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001970 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971
1972 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1973}
1974
1975/* Send ACL data */
1976static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1977{
1978 struct hci_acl_hdr *hdr;
1979 int len = skb->len;
1980
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001981 skb_push(skb, HCI_ACL_HDR_SIZE);
1982 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001983 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001984 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1985 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986}
1987
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001988static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1989 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990{
1991 struct hci_dev *hdev = conn->hdev;
1992 struct sk_buff *list;
1993
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001994 list = skb_shinfo(skb)->frag_list;
1995 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 /* Non fragmented */
1997 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1998
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001999 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 } else {
2001 /* Fragmented */
2002 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2003
2004 skb_shinfo(skb)->frag_list = NULL;
2005
2006 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002007 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002009 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002010
2011 flags &= ~ACL_START;
2012 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 do {
2014 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002017 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002018 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2021
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002022 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 } while (list);
2024
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002025 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002027}
2028
2029void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2030{
2031 struct hci_conn *conn = chan->conn;
2032 struct hci_dev *hdev = conn->hdev;
2033
2034 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2035
2036 skb->dev = (void *) hdev;
2037 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2038 hci_add_acl_hdr(skb, conn->handle, flags);
2039
2040 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002042 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043}
2044EXPORT_SYMBOL(hci_send_acl);
2045
2046/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002047void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048{
2049 struct hci_dev *hdev = conn->hdev;
2050 struct hci_sco_hdr hdr;
2051
2052 BT_DBG("%s len %d", hdev->name, skb->len);
2053
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002054 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 hdr.dlen = skb->len;
2056
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002057 skb_push(skb, HCI_SCO_HDR_SIZE);
2058 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002059 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002062 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002063
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002065 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066}
2067EXPORT_SYMBOL(hci_send_sco);
2068
2069/* ---- HCI TX task (outgoing data) ---- */
2070
2071/* HCI Connection scheduler */
2072static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2073{
2074 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002075 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002078 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002080
2081 rcu_read_lock();
2082
2083 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002084 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002086
2087 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2088 continue;
2089
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 num++;
2091
2092 if (c->sent < min) {
2093 min = c->sent;
2094 conn = c;
2095 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002096
2097 if (hci_conn_num(hdev, type) == num)
2098 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 }
2100
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002101 rcu_read_unlock();
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002104 int cnt, q;
2105
2106 switch (conn->type) {
2107 case ACL_LINK:
2108 cnt = hdev->acl_cnt;
2109 break;
2110 case SCO_LINK:
2111 case ESCO_LINK:
2112 cnt = hdev->sco_cnt;
2113 break;
2114 case LE_LINK:
2115 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2116 break;
2117 default:
2118 cnt = 0;
2119 BT_ERR("Unknown link type");
2120 }
2121
2122 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 *quote = q ? q : 1;
2124 } else
2125 *quote = 0;
2126
2127 BT_DBG("conn %p quote %d", conn, *quote);
2128 return conn;
2129}
2130
Ville Tervobae1f5d92011-02-10 22:38:53 -03002131static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
2133 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002134 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Ville Tervobae1f5d92011-02-10 22:38:53 -03002136 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002138 rcu_read_lock();
2139
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002141 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002142 if (c->type == type && c->sent) {
2143 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 hdev->name, batostr(&c->dst));
2145 hci_acl_disconn(c, 0x13);
2146 }
2147 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002148
2149 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150}
2151
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002152static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2153 int *quote)
2154{
2155 struct hci_conn_hash *h = &hdev->conn_hash;
2156 struct hci_chan *chan = NULL;
2157 int num = 0, min = ~0, cur_prio = 0;
2158 struct hci_conn *conn;
2159 int cnt, q, conn_num = 0;
2160
2161 BT_DBG("%s", hdev->name);
2162
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002163 rcu_read_lock();
2164
2165 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002166 struct hci_chan *tmp;
2167
2168 if (conn->type != type)
2169 continue;
2170
2171 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2172 continue;
2173
2174 conn_num++;
2175
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002176 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002177 struct sk_buff *skb;
2178
2179 if (skb_queue_empty(&tmp->data_q))
2180 continue;
2181
2182 skb = skb_peek(&tmp->data_q);
2183 if (skb->priority < cur_prio)
2184 continue;
2185
2186 if (skb->priority > cur_prio) {
2187 num = 0;
2188 min = ~0;
2189 cur_prio = skb->priority;
2190 }
2191
2192 num++;
2193
2194 if (conn->sent < min) {
2195 min = conn->sent;
2196 chan = tmp;
2197 }
2198 }
2199
2200 if (hci_conn_num(hdev, type) == conn_num)
2201 break;
2202 }
2203
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002204 rcu_read_unlock();
2205
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002206 if (!chan)
2207 return NULL;
2208
2209 switch (chan->conn->type) {
2210 case ACL_LINK:
2211 cnt = hdev->acl_cnt;
2212 break;
2213 case SCO_LINK:
2214 case ESCO_LINK:
2215 cnt = hdev->sco_cnt;
2216 break;
2217 case LE_LINK:
2218 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2219 break;
2220 default:
2221 cnt = 0;
2222 BT_ERR("Unknown link type");
2223 }
2224
2225 q = cnt / num;
2226 *quote = q ? q : 1;
2227 BT_DBG("chan %p quote %d", chan, *quote);
2228 return chan;
2229}
2230
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002231static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2232{
2233 struct hci_conn_hash *h = &hdev->conn_hash;
2234 struct hci_conn *conn;
2235 int num = 0;
2236
2237 BT_DBG("%s", hdev->name);
2238
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002239 rcu_read_lock();
2240
2241 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002242 struct hci_chan *chan;
2243
2244 if (conn->type != type)
2245 continue;
2246
2247 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2248 continue;
2249
2250 num++;
2251
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002252 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002253 struct sk_buff *skb;
2254
2255 if (chan->sent) {
2256 chan->sent = 0;
2257 continue;
2258 }
2259
2260 if (skb_queue_empty(&chan->data_q))
2261 continue;
2262
2263 skb = skb_peek(&chan->data_q);
2264 if (skb->priority >= HCI_PRIO_MAX - 1)
2265 continue;
2266
2267 skb->priority = HCI_PRIO_MAX - 1;
2268
2269 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2270 skb->priority);
2271 }
2272
2273 if (hci_conn_num(hdev, type) == num)
2274 break;
2275 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002276
2277 rcu_read_unlock();
2278
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002279}
2280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281static inline void hci_sched_acl(struct hci_dev *hdev)
2282{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002283 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 struct sk_buff *skb;
2285 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002286 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287
2288 BT_DBG("%s", hdev->name);
2289
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002290 if (!hci_conn_num(hdev, ACL_LINK))
2291 return;
2292
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 if (!test_bit(HCI_RAW, &hdev->flags)) {
2294 /* ACL tx timeout must be longer than maximum
2295 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002296 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002297 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 }
2299
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002300 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002301
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002302 while (hdev->acl_cnt &&
2303 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002304 u32 priority = (skb_peek(&chan->data_q))->priority;
2305 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002306 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2307 skb->len, skb->priority);
2308
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002309 /* Stop if priority has changed */
2310 if (skb->priority < priority)
2311 break;
2312
2313 skb = skb_dequeue(&chan->data_q);
2314
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002315 hci_conn_enter_active_mode(chan->conn,
2316 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 hci_send_frame(skb);
2319 hdev->acl_last_tx = jiffies;
2320
2321 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002322 chan->sent++;
2323 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 }
2325 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002326
2327 if (cnt != hdev->acl_cnt)
2328 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329}
2330
2331/* Schedule SCO */
2332static inline void hci_sched_sco(struct hci_dev *hdev)
2333{
2334 struct hci_conn *conn;
2335 struct sk_buff *skb;
2336 int quote;
2337
2338 BT_DBG("%s", hdev->name);
2339
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002340 if (!hci_conn_num(hdev, SCO_LINK))
2341 return;
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2344 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2345 BT_DBG("skb %p len %d", skb, skb->len);
2346 hci_send_frame(skb);
2347
2348 conn->sent++;
2349 if (conn->sent == ~0)
2350 conn->sent = 0;
2351 }
2352 }
2353}
2354
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002355static inline void hci_sched_esco(struct hci_dev *hdev)
2356{
2357 struct hci_conn *conn;
2358 struct sk_buff *skb;
2359 int quote;
2360
2361 BT_DBG("%s", hdev->name);
2362
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002363 if (!hci_conn_num(hdev, ESCO_LINK))
2364 return;
2365
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002366 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2367 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2368 BT_DBG("skb %p len %d", skb, skb->len);
2369 hci_send_frame(skb);
2370
2371 conn->sent++;
2372 if (conn->sent == ~0)
2373 conn->sent = 0;
2374 }
2375 }
2376}
2377
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002378static inline void hci_sched_le(struct hci_dev *hdev)
2379{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002380 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002381 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002382 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002383
2384 BT_DBG("%s", hdev->name);
2385
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002386 if (!hci_conn_num(hdev, LE_LINK))
2387 return;
2388
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002389 if (!test_bit(HCI_RAW, &hdev->flags)) {
2390 /* LE tx timeout must be longer than maximum
2391 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002392 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002393 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002394 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002395 }
2396
2397 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002398 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002399 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002400 u32 priority = (skb_peek(&chan->data_q))->priority;
2401 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002402 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2403 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002404
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002405 /* Stop if priority has changed */
2406 if (skb->priority < priority)
2407 break;
2408
2409 skb = skb_dequeue(&chan->data_q);
2410
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002411 hci_send_frame(skb);
2412 hdev->le_last_tx = jiffies;
2413
2414 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002415 chan->sent++;
2416 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002417 }
2418 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002419
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002420 if (hdev->le_pkts)
2421 hdev->le_cnt = cnt;
2422 else
2423 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002424
2425 if (cnt != tmp)
2426 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002427}
2428
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002429static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002431 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 struct sk_buff *skb;
2433
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002434 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2435 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
2437 /* Schedule queues and send stuff to HCI driver */
2438
2439 hci_sched_acl(hdev);
2440
2441 hci_sched_sco(hdev);
2442
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002443 hci_sched_esco(hdev);
2444
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002445 hci_sched_le(hdev);
2446
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 /* Send next queued raw (unknown type) packet */
2448 while ((skb = skb_dequeue(&hdev->raw_q)))
2449 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450}
2451
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002452/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453
2454/* ACL data packet */
2455static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2456{
2457 struct hci_acl_hdr *hdr = (void *) skb->data;
2458 struct hci_conn *conn;
2459 __u16 handle, flags;
2460
2461 skb_pull(skb, HCI_ACL_HDR_SIZE);
2462
2463 handle = __le16_to_cpu(hdr->handle);
2464 flags = hci_flags(handle);
2465 handle = hci_handle(handle);
2466
2467 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2468
2469 hdev->stat.acl_rx++;
2470
2471 hci_dev_lock(hdev);
2472 conn = hci_conn_hash_lookup_handle(hdev, handle);
2473 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002474
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002476 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002477
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002479 l2cap_recv_acldata(conn, skb, flags);
2480 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002482 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 hdev->name, handle);
2484 }
2485
2486 kfree_skb(skb);
2487}
2488
2489/* SCO data packet */
2490static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2491{
2492 struct hci_sco_hdr *hdr = (void *) skb->data;
2493 struct hci_conn *conn;
2494 __u16 handle;
2495
2496 skb_pull(skb, HCI_SCO_HDR_SIZE);
2497
2498 handle = __le16_to_cpu(hdr->handle);
2499
2500 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2501
2502 hdev->stat.sco_rx++;
2503
2504 hci_dev_lock(hdev);
2505 conn = hci_conn_hash_lookup_handle(hdev, handle);
2506 hci_dev_unlock(hdev);
2507
2508 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002510 sco_recv_scodata(conn, skb);
2511 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002513 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 hdev->name, handle);
2515 }
2516
2517 kfree_skb(skb);
2518}
2519
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002520static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002522 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 struct sk_buff *skb;
2524
2525 BT_DBG("%s", hdev->name);
2526
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 while ((skb = skb_dequeue(&hdev->rx_q))) {
2528 if (atomic_read(&hdev->promisc)) {
2529 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002530 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 }
2532
2533 if (test_bit(HCI_RAW, &hdev->flags)) {
2534 kfree_skb(skb);
2535 continue;
2536 }
2537
2538 if (test_bit(HCI_INIT, &hdev->flags)) {
2539 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002540 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 case HCI_ACLDATA_PKT:
2542 case HCI_SCODATA_PKT:
2543 kfree_skb(skb);
2544 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002545 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 }
2547
2548 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002549 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002551 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 hci_event_packet(hdev, skb);
2553 break;
2554
2555 case HCI_ACLDATA_PKT:
2556 BT_DBG("%s ACL data packet", hdev->name);
2557 hci_acldata_packet(hdev, skb);
2558 break;
2559
2560 case HCI_SCODATA_PKT:
2561 BT_DBG("%s SCO data packet", hdev->name);
2562 hci_scodata_packet(hdev, skb);
2563 break;
2564
2565 default:
2566 kfree_skb(skb);
2567 break;
2568 }
2569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570}
2571
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002572static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002574 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 struct sk_buff *skb;
2576
2577 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002580 if (atomic_read(&hdev->cmd_cnt)) {
2581 skb = skb_dequeue(&hdev->cmd_q);
2582 if (!skb)
2583 return;
2584
Wei Yongjun7585b972009-02-25 18:29:52 +08002585 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002587 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2588 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 atomic_dec(&hdev->cmd_cnt);
2590 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002591 if (test_bit(HCI_RESET, &hdev->flags))
2592 del_timer(&hdev->cmd_timer);
2593 else
2594 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002595 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 } else {
2597 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002598 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599 }
2600 }
2601}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002602
2603int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2604{
2605 /* General inquiry access code (GIAC) */
2606 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2607 struct hci_cp_inquiry cp;
2608
2609 BT_DBG("%s", hdev->name);
2610
2611 if (test_bit(HCI_INQUIRY, &hdev->flags))
2612 return -EINPROGRESS;
2613
2614 memset(&cp, 0, sizeof(cp));
2615 memcpy(&cp.lap, lap, sizeof(cp.lap));
2616 cp.length = length;
2617
2618 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2619}
Andre Guedes023d50492011-11-04 14:16:52 -03002620
2621int hci_cancel_inquiry(struct hci_dev *hdev)
2622{
2623 BT_DBG("%s", hdev->name);
2624
2625 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2626 return -EPERM;
2627
2628 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2629}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002630
2631module_param(enable_hs, bool, 0644);
2632MODULE_PARM_DESC(enable_hs, "Enable High Speed");