blob: 845da3ee56a0d3966bdeb5ab627ef3dc49c8dead [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
43#include <linux/notifier.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020044#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020045#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030046#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070047#include <net/sock.h>
48
49#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020050#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
Johan Hedbergab81cbf2010-12-15 13:53:18 +020056#define AUTO_OFF_TIMEOUT 2000
57
Andrei Emeltchenko7784d782011-11-18 13:35:42 +020058int enable_hs;
59
Marcel Holtmannb78752c2010-08-08 23:06:53 -040060static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020061static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020062static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072/* HCI notifiers list */
Alan Sterne041c682006-03-27 01:16:30 -080073static ATOMIC_NOTIFIER_HEAD(hci_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75/* ---- HCI notifications ---- */
76
77int hci_register_notifier(struct notifier_block *nb)
78{
Alan Sterne041c682006-03-27 01:16:30 -080079 return atomic_notifier_chain_register(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080}
81
82int hci_unregister_notifier(struct notifier_block *nb)
83{
Alan Sterne041c682006-03-27 01:16:30 -080084 return atomic_notifier_chain_unregister(&hci_notifier, nb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085}
86
Marcel Holtmann65164552005-10-28 19:20:48 +020087static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Alan Sterne041c682006-03-27 01:16:30 -080089 atomic_notifier_call_chain(&hci_notifier, event, hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
92/* ---- HCI requests ---- */
93
Johan Hedberg23bb5762010-12-21 23:01:27 +020094void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Johan Hedberg23bb5762010-12-21 23:01:27 +020096 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
Johan Hedberga5040ef2011-01-10 13:28:59 +020098 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
Johan Hedberg23bb5762010-12-21 23:01:27 +0200102 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111static void hci_req_cancel(struct hci_dev *hdev, int err)
112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900123static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100124 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125{
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700146 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Johan Hedberga5040ef2011-01-10 13:28:59 +0200158 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163}
164
165static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100166 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
168 int ret;
169
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179}
180
181static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182{
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300186 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200192 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800193 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200194 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Mandatory initialization */
199
200 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200209 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
224 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200230 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700234 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240}
241
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200242static void amp_init(struct hci_dev *hdev)
243{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251}
252
253static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254{
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285}
286
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300287static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288{
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293}
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
315static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200321 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326{
327 __le16 policy = cpu_to_le16(opt);
328
Marcel Holtmanna418b892008-11-30 12:17:28 +0100329 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333}
334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900335/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 * Device is held on return. */
337struct hci_dev *hci_dev_get(int index)
338{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200339 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/* ---- Inquiry support ---- */
358static void inquiry_cache_flush(struct hci_dev *hdev)
359{
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *next = cache->list, *e;
362
363 BT_DBG("cache %p", cache);
364
365 cache->list = NULL;
366 while ((e = next)) {
367 next = e->next;
368 kfree(e);
369 }
370}
371
372struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
373{
374 struct inquiry_cache *cache = &hdev->inq_cache;
375 struct inquiry_entry *e;
376
377 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
378
379 for (e = cache->list; e; e = e->next)
380 if (!bacmp(&e->data.bdaddr, bdaddr))
381 break;
382 return e;
383}
384
385void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
386{
387 struct inquiry_cache *cache = &hdev->inq_cache;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200388 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
390 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
391
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200392 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
393 if (!ie) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 /* Entry not in the cache. Add new one. */
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200395 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
396 if (!ie)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200398
399 ie->next = cache->list;
400 cache->list = ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
402
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200403 memcpy(&ie->data, data, sizeof(*data));
404 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 cache->timestamp = jiffies;
406}
407
408static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
409{
410 struct inquiry_cache *cache = &hdev->inq_cache;
411 struct inquiry_info *info = (struct inquiry_info *) buf;
412 struct inquiry_entry *e;
413 int copied = 0;
414
415 for (e = cache->list; e && copied < num; e = e->next, copied++) {
416 struct inquiry_data *data = &e->data;
417 bacpy(&info->bdaddr, &data->bdaddr);
418 info->pscan_rep_mode = data->pscan_rep_mode;
419 info->pscan_period_mode = data->pscan_period_mode;
420 info->pscan_mode = data->pscan_mode;
421 memcpy(info->dev_class, data->dev_class, 3);
422 info->clock_offset = data->clock_offset;
423 info++;
424 }
425
426 BT_DBG("cache %p, copied %d", cache, copied);
427 return copied;
428}
429
430static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
431{
432 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
433 struct hci_cp_inquiry cp;
434
435 BT_DBG("%s", hdev->name);
436
437 if (test_bit(HCI_INQUIRY, &hdev->flags))
438 return;
439
440 /* Start Inquiry */
441 memcpy(&cp.lap, &ir->lap, 3);
442 cp.length = ir->length;
443 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200444 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
446
447int hci_inquiry(void __user *arg)
448{
449 __u8 __user *ptr = arg;
450 struct hci_inquiry_req ir;
451 struct hci_dev *hdev;
452 int err = 0, do_inquiry = 0, max_rsp;
453 long timeo;
454 __u8 *buf;
455
456 if (copy_from_user(&ir, ptr, sizeof(ir)))
457 return -EFAULT;
458
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200459 hdev = hci_dev_get(ir.dev_id);
460 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 return -ENODEV;
462
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300463 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900464 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200465 inquiry_cache_empty(hdev) ||
466 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 inquiry_cache_flush(hdev);
468 do_inquiry = 1;
469 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300470 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
Marcel Holtmann04837f62006-07-03 10:02:33 +0200472 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200473
474 if (do_inquiry) {
475 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
476 if (err < 0)
477 goto done;
478 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
480 /* for unlimited number of responses we will use buffer with 255 entries */
481 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
482
483 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
484 * copy it to the user space.
485 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100486 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200487 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 err = -ENOMEM;
489 goto done;
490 }
491
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300492 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300494 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495
496 BT_DBG("num_rsp %d", ir.num_rsp);
497
498 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
499 ptr += sizeof(ir);
500 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
501 ir.num_rsp))
502 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900503 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 err = -EFAULT;
505
506 kfree(buf);
507
508done:
509 hci_dev_put(hdev);
510 return err;
511}
512
513/* ---- HCI ioctl helpers ---- */
514
515int hci_dev_open(__u16 dev)
516{
517 struct hci_dev *hdev;
518 int ret = 0;
519
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200520 hdev = hci_dev_get(dev);
521 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 return -ENODEV;
523
524 BT_DBG("%s %p", hdev->name, hdev);
525
526 hci_req_lock(hdev);
527
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200528 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
529 ret = -ERFKILL;
530 goto done;
531 }
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 if (test_bit(HCI_UP, &hdev->flags)) {
534 ret = -EALREADY;
535 goto done;
536 }
537
538 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
539 set_bit(HCI_RAW, &hdev->flags);
540
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200541 /* Treat all non BR/EDR controllers as raw devices if
542 enable_hs is not set */
543 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100544 set_bit(HCI_RAW, &hdev->flags);
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 if (hdev->open(hdev)) {
547 ret = -EIO;
548 goto done;
549 }
550
551 if (!test_bit(HCI_RAW, &hdev->flags)) {
552 atomic_set(&hdev->cmd_cnt, 1);
553 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200554 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Marcel Holtmann04837f62006-07-03 10:02:33 +0200556 ret = __hci_request(hdev, hci_init_req, 0,
557 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Andre Guedeseead27d2011-06-30 19:20:55 -0300559 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300560 ret = __hci_request(hdev, hci_le_init_req, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT));
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 clear_bit(HCI_INIT, &hdev->flags);
564 }
565
566 if (!ret) {
567 hci_dev_hold(hdev);
568 set_bit(HCI_UP, &hdev->flags);
569 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200570 if (!test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300571 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200572 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300573 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200574 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900575 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200577 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200578 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400579 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 skb_queue_purge(&hdev->cmd_q);
582 skb_queue_purge(&hdev->rx_q);
583
584 if (hdev->flush)
585 hdev->flush(hdev);
586
587 if (hdev->sent_cmd) {
588 kfree_skb(hdev->sent_cmd);
589 hdev->sent_cmd = NULL;
590 }
591
592 hdev->close(hdev);
593 hdev->flags = 0;
594 }
595
596done:
597 hci_req_unlock(hdev);
598 hci_dev_put(hdev);
599 return ret;
600}
601
602static int hci_dev_do_close(struct hci_dev *hdev)
603{
604 BT_DBG("%s %p", hdev->name, hdev);
605
606 hci_req_cancel(hdev, ENODEV);
607 hci_req_lock(hdev);
608
609 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300610 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 hci_req_unlock(hdev);
612 return 0;
613 }
614
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200615 /* Flush RX and TX works */
616 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400617 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200619 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200620 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200621 hdev->discov_timeout = 0;
622 }
623
Johan Hedberg32435532011-11-07 22:16:04 +0200624 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200625 cancel_delayed_work(&hdev->power_off);
Johan Hedberg32435532011-11-07 22:16:04 +0200626
Johan Hedberg7d785252011-12-15 00:47:39 +0200627 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
628 cancel_delayed_work(&hdev->service_cache);
629
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300630 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 inquiry_cache_flush(hdev);
632 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
635 hci_notify(hdev, HCI_DEV_DOWN);
636
637 if (hdev->flush)
638 hdev->flush(hdev);
639
640 /* Reset device */
641 skb_queue_purge(&hdev->cmd_q);
642 atomic_set(&hdev->cmd_cnt, 1);
643 if (!test_bit(HCI_RAW, &hdev->flags)) {
644 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200645 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200646 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200650 /* flush cmd work */
651 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 /* Drop queues */
654 skb_queue_purge(&hdev->rx_q);
655 skb_queue_purge(&hdev->cmd_q);
656 skb_queue_purge(&hdev->raw_q);
657
658 /* Drop last sent command */
659 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300660 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 kfree_skb(hdev->sent_cmd);
662 hdev->sent_cmd = NULL;
663 }
664
665 /* After this point our queues are empty
666 * and no tasks are scheduled. */
667 hdev->close(hdev);
668
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300669 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200670 mgmt_powered(hdev, 0);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300671 hci_dev_unlock(hdev);
Johan Hedberg5add6af2010-12-16 10:00:37 +0200672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 /* Clear flags */
674 hdev->flags = 0;
675
676 hci_req_unlock(hdev);
677
678 hci_dev_put(hdev);
679 return 0;
680}
681
682int hci_dev_close(__u16 dev)
683{
684 struct hci_dev *hdev;
685 int err;
686
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200687 hdev = hci_dev_get(dev);
688 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 return -ENODEV;
690 err = hci_dev_do_close(hdev);
691 hci_dev_put(hdev);
692 return err;
693}
694
695int hci_dev_reset(__u16 dev)
696{
697 struct hci_dev *hdev;
698 int ret = 0;
699
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200700 hdev = hci_dev_get(dev);
701 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 return -ENODEV;
703
704 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706 if (!test_bit(HCI_UP, &hdev->flags))
707 goto done;
708
709 /* Drop queues */
710 skb_queue_purge(&hdev->rx_q);
711 skb_queue_purge(&hdev->cmd_q);
712
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300713 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 inquiry_cache_flush(hdev);
715 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300716 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 if (hdev->flush)
719 hdev->flush(hdev);
720
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900721 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300722 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200725 ret = __hci_request(hdev, hci_reset_req, 0,
726 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727
728done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734int hci_dev_reset_stat(__u16 dev)
735{
736 struct hci_dev *hdev;
737 int ret = 0;
738
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200739 hdev = hci_dev_get(dev);
740 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -ENODEV;
742
743 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
744
745 hci_dev_put(hdev);
746
747 return ret;
748}
749
750int hci_dev_cmd(unsigned int cmd, void __user *arg)
751{
752 struct hci_dev *hdev;
753 struct hci_dev_req dr;
754 int err = 0;
755
756 if (copy_from_user(&dr, arg, sizeof(dr)))
757 return -EFAULT;
758
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200759 hdev = hci_dev_get(dr.dev_id);
760 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 return -ENODEV;
762
763 switch (cmd) {
764 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200765 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
766 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 break;
768
769 case HCISETENCRYPT:
770 if (!lmp_encrypt_capable(hdev)) {
771 err = -EOPNOTSUPP;
772 break;
773 }
774
775 if (!test_bit(HCI_AUTH, &hdev->flags)) {
776 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200777 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
778 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (err)
780 break;
781 }
782
Marcel Holtmann04837f62006-07-03 10:02:33 +0200783 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 break;
786
787 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200788 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
789 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 break;
791
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200792 case HCISETLINKPOL:
793 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
794 msecs_to_jiffies(HCI_INIT_TIMEOUT));
795 break;
796
797 case HCISETLINKMODE:
798 hdev->link_mode = ((__u16) dr.dev_opt) &
799 (HCI_LM_MASTER | HCI_LM_ACCEPT);
800 break;
801
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 case HCISETPTYPE:
803 hdev->pkt_type = (__u16) dr.dev_opt;
804 break;
805
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200807 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
808 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 break;
810
811 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200812 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
813 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 break;
815
816 default:
817 err = -EINVAL;
818 break;
819 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hci_dev_put(hdev);
822 return err;
823}
824
825int hci_get_dev_list(void __user *arg)
826{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200827 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 struct hci_dev_list_req *dl;
829 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 int n = 0, size, err;
831 __u16 dev_num;
832
833 if (get_user(dev_num, (__u16 __user *) arg))
834 return -EFAULT;
835
836 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
837 return -EINVAL;
838
839 size = sizeof(*dl) + dev_num * sizeof(*dr);
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 dl = kzalloc(size, GFP_KERNEL);
842 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENOMEM;
844
845 dr = dl->dev_req;
846
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200847 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200848 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberg32435532011-11-07 22:16:04 +0200849 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200850 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200851
852 if (!test_bit(HCI_MGMT, &hdev->flags))
853 set_bit(HCI_PAIRABLE, &hdev->flags);
854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 (dr + n)->dev_id = hdev->id;
856 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200857
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 if (++n >= dev_num)
859 break;
860 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200861 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 dl->dev_num = n;
864 size = sizeof(*dl) + n * sizeof(*dr);
865
866 err = copy_to_user(arg, dl, size);
867 kfree(dl);
868
869 return err ? -EFAULT : 0;
870}
871
872int hci_get_dev_info(void __user *arg)
873{
874 struct hci_dev *hdev;
875 struct hci_dev_info di;
876 int err = 0;
877
878 if (copy_from_user(&di, arg, sizeof(di)))
879 return -EFAULT;
880
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200881 hdev = hci_dev_get(di.dev_id);
882 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return -ENODEV;
884
Johan Hedberg32435532011-11-07 22:16:04 +0200885 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
886 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200887
Johan Hedbergc542a062011-01-26 13:11:03 +0200888 if (!test_bit(HCI_MGMT, &hdev->flags))
889 set_bit(HCI_PAIRABLE, &hdev->flags);
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 strcpy(di.name, hdev->name);
892 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100893 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 di.flags = hdev->flags;
895 di.pkt_type = hdev->pkt_type;
896 di.acl_mtu = hdev->acl_mtu;
897 di.acl_pkts = hdev->acl_pkts;
898 di.sco_mtu = hdev->sco_mtu;
899 di.sco_pkts = hdev->sco_pkts;
900 di.link_policy = hdev->link_policy;
901 di.link_mode = hdev->link_mode;
902
903 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
904 memcpy(&di.features, &hdev->features, sizeof(di.features));
905
906 if (copy_to_user(arg, &di, sizeof(di)))
907 err = -EFAULT;
908
909 hci_dev_put(hdev);
910
911 return err;
912}
913
914/* ---- Interface to HCI drivers ---- */
915
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200916static int hci_rfkill_set_block(void *data, bool blocked)
917{
918 struct hci_dev *hdev = data;
919
920 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
921
922 if (!blocked)
923 return 0;
924
925 hci_dev_do_close(hdev);
926
927 return 0;
928}
929
930static const struct rfkill_ops hci_rfkill_ops = {
931 .set_block = hci_rfkill_set_block,
932};
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934/* Alloc HCI device */
935struct hci_dev *hci_alloc_dev(void)
936{
937 struct hci_dev *hdev;
938
Marcel Holtmann25ea6db2006-07-06 15:40:09 +0200939 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (!hdev)
941 return NULL;
942
David Herrmann0ac7e702011-10-08 14:58:47 +0200943 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 skb_queue_head_init(&hdev->driver_init);
945
946 return hdev;
947}
948EXPORT_SYMBOL(hci_alloc_dev);
949
950/* Free HCI device */
951void hci_free_dev(struct hci_dev *hdev)
952{
953 skb_queue_purge(&hdev->driver_init);
954
Marcel Holtmanna91f2e32006-07-03 10:02:41 +0200955 /* will free via device release */
956 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957}
958EXPORT_SYMBOL(hci_free_dev);
959
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200960static void hci_power_on(struct work_struct *work)
961{
962 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
963
964 BT_DBG("%s", hdev->name);
965
966 if (hci_dev_open(hdev->id) < 0)
967 return;
968
969 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -0200970 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +0200971 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200972
973 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
Johan Hedberg744cf192011-11-08 20:40:14 +0200974 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200975}
976
977static void hci_power_off(struct work_struct *work)
978{
Johan Hedberg32435532011-11-07 22:16:04 +0200979 struct hci_dev *hdev = container_of(work, struct hci_dev,
980 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200981
982 BT_DBG("%s", hdev->name);
983
Johan Hedberg32435532011-11-07 22:16:04 +0200984 clear_bit(HCI_AUTO_OFF, &hdev->flags);
985
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986 hci_dev_close(hdev->id);
987}
988
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200989static void hci_discov_off(struct work_struct *work)
990{
991 struct hci_dev *hdev;
992 u8 scan = SCAN_PAGE;
993
994 hdev = container_of(work, struct hci_dev, discov_off.work);
995
996 BT_DBG("%s", hdev->name);
997
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300998 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200999
1000 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1001
1002 hdev->discov_timeout = 0;
1003
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001004 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001005}
1006
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001007int hci_uuids_clear(struct hci_dev *hdev)
1008{
1009 struct list_head *p, *n;
1010
1011 list_for_each_safe(p, n, &hdev->uuids) {
1012 struct bt_uuid *uuid;
1013
1014 uuid = list_entry(p, struct bt_uuid, list);
1015
1016 list_del(p);
1017 kfree(uuid);
1018 }
1019
1020 return 0;
1021}
1022
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001023int hci_link_keys_clear(struct hci_dev *hdev)
1024{
1025 struct list_head *p, *n;
1026
1027 list_for_each_safe(p, n, &hdev->link_keys) {
1028 struct link_key *key;
1029
1030 key = list_entry(p, struct link_key, list);
1031
1032 list_del(p);
1033 kfree(key);
1034 }
1035
1036 return 0;
1037}
1038
1039struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1040{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001041 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001042
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001043 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001044 if (bacmp(bdaddr, &k->bdaddr) == 0)
1045 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001046
1047 return NULL;
1048}
1049
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001050static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1051 u8 key_type, u8 old_key_type)
1052{
1053 /* Legacy key */
1054 if (key_type < 0x03)
1055 return 1;
1056
1057 /* Debug keys are insecure so don't store them persistently */
1058 if (key_type == HCI_LK_DEBUG_COMBINATION)
1059 return 0;
1060
1061 /* Changed combination key and there's no previous one */
1062 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1063 return 0;
1064
1065 /* Security mode 3 case */
1066 if (!conn)
1067 return 1;
1068
1069 /* Neither local nor remote side had no-bonding as requirement */
1070 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1071 return 1;
1072
1073 /* Local side had dedicated bonding as requirement */
1074 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1075 return 1;
1076
1077 /* Remote side had dedicated bonding as requirement */
1078 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1079 return 1;
1080
1081 /* If none of the above criteria match, then don't store the key
1082 * persistently */
1083 return 0;
1084}
1085
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001086struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1087{
1088 struct link_key *k;
1089
1090 list_for_each_entry(k, &hdev->link_keys, list) {
1091 struct key_master_id *id;
1092
1093 if (k->type != HCI_LK_SMP_LTK)
1094 continue;
1095
1096 if (k->dlen != sizeof(*id))
1097 continue;
1098
1099 id = (void *) &k->data;
1100 if (id->ediv == ediv &&
1101 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1102 return k;
1103 }
1104
1105 return NULL;
1106}
1107EXPORT_SYMBOL(hci_find_ltk);
1108
1109struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1110 bdaddr_t *bdaddr, u8 type)
1111{
1112 struct link_key *k;
1113
1114 list_for_each_entry(k, &hdev->link_keys, list)
1115 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1116 return k;
1117
1118 return NULL;
1119}
1120EXPORT_SYMBOL(hci_find_link_key_type);
1121
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001122int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1123 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001124{
1125 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001126 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001127
1128 old_key = hci_find_link_key(hdev, bdaddr);
1129 if (old_key) {
1130 old_key_type = old_key->type;
1131 key = old_key;
1132 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001133 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001134 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1135 if (!key)
1136 return -ENOMEM;
1137 list_add(&key->list, &hdev->link_keys);
1138 }
1139
1140 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1141
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001142 /* Some buggy controller combinations generate a changed
1143 * combination key for legacy pairing even when there's no
1144 * previous key */
1145 if (type == HCI_LK_CHANGED_COMBINATION &&
1146 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001147 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001148 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001149 if (conn)
1150 conn->key_type = type;
1151 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001152
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001153 bacpy(&key->bdaddr, bdaddr);
1154 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001155 key->pin_len = pin_len;
1156
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001157 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001158 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001159 else
1160 key->type = type;
1161
Johan Hedberg4df378a2011-04-28 11:29:03 -07001162 if (!new_key)
1163 return 0;
1164
1165 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1166
Johan Hedberg744cf192011-11-08 20:40:14 +02001167 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001168
1169 if (!persistent) {
1170 list_del(&key->list);
1171 kfree(key);
1172 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173
1174 return 0;
1175}
1176
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001177int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001178 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001179{
1180 struct link_key *key, *old_key;
1181 struct key_master_id *id;
1182 u8 old_key_type;
1183
1184 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1185
1186 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1187 if (old_key) {
1188 key = old_key;
1189 old_key_type = old_key->type;
1190 } else {
1191 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1192 if (!key)
1193 return -ENOMEM;
1194 list_add(&key->list, &hdev->link_keys);
1195 old_key_type = 0xff;
1196 }
1197
1198 key->dlen = sizeof(*id);
1199
1200 bacpy(&key->bdaddr, bdaddr);
1201 memcpy(key->val, ltk, sizeof(key->val));
1202 key->type = HCI_LK_SMP_LTK;
Vinicius Costa Gomes726b4ff2011-07-08 18:31:45 -03001203 key->pin_len = key_size;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001204
1205 id = (void *) &key->data;
1206 id->ediv = ediv;
1207 memcpy(id->rand, rand, sizeof(id->rand));
1208
1209 if (new_key)
Johan Hedberg744cf192011-11-08 20:40:14 +02001210 mgmt_new_link_key(hdev, key, old_key_type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001211
1212 return 0;
1213}
1214
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001215int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1216{
1217 struct link_key *key;
1218
1219 key = hci_find_link_key(hdev, bdaddr);
1220 if (!key)
1221 return -ENOENT;
1222
1223 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1224
1225 list_del(&key->list);
1226 kfree(key);
1227
1228 return 0;
1229}
1230
Ville Tervo6bd32322011-02-16 16:32:41 +02001231/* HCI command timer function */
1232static void hci_cmd_timer(unsigned long arg)
1233{
1234 struct hci_dev *hdev = (void *) arg;
1235
1236 BT_ERR("%s command tx timeout", hdev->name);
1237 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001238 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001239}
1240
Szymon Janc2763eda2011-03-22 13:12:22 +01001241struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1242 bdaddr_t *bdaddr)
1243{
1244 struct oob_data *data;
1245
1246 list_for_each_entry(data, &hdev->remote_oob_data, list)
1247 if (bacmp(bdaddr, &data->bdaddr) == 0)
1248 return data;
1249
1250 return NULL;
1251}
1252
1253int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1254{
1255 struct oob_data *data;
1256
1257 data = hci_find_remote_oob_data(hdev, bdaddr);
1258 if (!data)
1259 return -ENOENT;
1260
1261 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1262
1263 list_del(&data->list);
1264 kfree(data);
1265
1266 return 0;
1267}
1268
1269int hci_remote_oob_data_clear(struct hci_dev *hdev)
1270{
1271 struct oob_data *data, *n;
1272
1273 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1274 list_del(&data->list);
1275 kfree(data);
1276 }
1277
1278 return 0;
1279}
1280
1281int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1282 u8 *randomizer)
1283{
1284 struct oob_data *data;
1285
1286 data = hci_find_remote_oob_data(hdev, bdaddr);
1287
1288 if (!data) {
1289 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1290 if (!data)
1291 return -ENOMEM;
1292
1293 bacpy(&data->bdaddr, bdaddr);
1294 list_add(&data->list, &hdev->remote_oob_data);
1295 }
1296
1297 memcpy(data->hash, hash, sizeof(data->hash));
1298 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1299
1300 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1301
1302 return 0;
1303}
1304
Antti Julkub2a66aa2011-06-15 12:01:14 +03001305struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1306 bdaddr_t *bdaddr)
1307{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001308 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001309
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001310 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001311 if (bacmp(bdaddr, &b->bdaddr) == 0)
1312 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001313
1314 return NULL;
1315}
1316
1317int hci_blacklist_clear(struct hci_dev *hdev)
1318{
1319 struct list_head *p, *n;
1320
1321 list_for_each_safe(p, n, &hdev->blacklist) {
1322 struct bdaddr_list *b;
1323
1324 b = list_entry(p, struct bdaddr_list, list);
1325
1326 list_del(p);
1327 kfree(b);
1328 }
1329
1330 return 0;
1331}
1332
1333int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334{
1335 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001336
1337 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1338 return -EBADF;
1339
Antti Julku5e762442011-08-25 16:48:02 +03001340 if (hci_blacklist_lookup(hdev, bdaddr))
1341 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001342
1343 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001344 if (!entry)
1345 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001346
1347 bacpy(&entry->bdaddr, bdaddr);
1348
1349 list_add(&entry->list, &hdev->blacklist);
1350
Johan Hedberg744cf192011-11-08 20:40:14 +02001351 return mgmt_device_blocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001352}
1353
1354int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355{
1356 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001357
Szymon Janc1ec918c2011-11-16 09:32:21 +01001358 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001359 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001360
1361 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001362 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001363 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001364
1365 list_del(&entry->list);
1366 kfree(entry);
1367
Johan Hedberg744cf192011-11-08 20:40:14 +02001368 return mgmt_device_unblocked(hdev, bdaddr);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001369}
1370
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001371static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001372{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001373 struct hci_dev *hdev = container_of(work, struct hci_dev,
1374 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001375
1376 hci_dev_lock(hdev);
1377
1378 hci_adv_entries_clear(hdev);
1379
1380 hci_dev_unlock(hdev);
1381}
1382
Andre Guedes76c86862011-05-26 16:23:50 -03001383int hci_adv_entries_clear(struct hci_dev *hdev)
1384{
1385 struct adv_entry *entry, *tmp;
1386
1387 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1388 list_del(&entry->list);
1389 kfree(entry);
1390 }
1391
1392 BT_DBG("%s adv cache cleared", hdev->name);
1393
1394 return 0;
1395}
1396
1397struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1398{
1399 struct adv_entry *entry;
1400
1401 list_for_each_entry(entry, &hdev->adv_entries, list)
1402 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1403 return entry;
1404
1405 return NULL;
1406}
1407
1408static inline int is_connectable_adv(u8 evt_type)
1409{
1410 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1411 return 1;
1412
1413 return 0;
1414}
1415
1416int hci_add_adv_entry(struct hci_dev *hdev,
1417 struct hci_ev_le_advertising_info *ev)
1418{
1419 struct adv_entry *entry;
1420
1421 if (!is_connectable_adv(ev->evt_type))
1422 return -EINVAL;
1423
1424 /* Only new entries should be added to adv_entries. So, if
1425 * bdaddr was found, don't add it. */
1426 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1427 return 0;
1428
1429 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1430 if (!entry)
1431 return -ENOMEM;
1432
1433 bacpy(&entry->bdaddr, &ev->bdaddr);
1434 entry->bdaddr_type = ev->bdaddr_type;
1435
1436 list_add(&entry->list, &hdev->adv_entries);
1437
1438 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1439 batostr(&entry->bdaddr), entry->bdaddr_type);
1440
1441 return 0;
1442}
1443
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444/* Register HCI device */
1445int hci_register_dev(struct hci_dev *hdev)
1446{
1447 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001448 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001450 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1451 hdev->bus, hdev->owner);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
1453 if (!hdev->open || !hdev->close || !hdev->destruct)
1454 return -EINVAL;
1455
Mat Martineau08add512011-11-02 16:18:36 -07001456 /* Do not allow HCI_AMP devices to register at index 0,
1457 * so the index can be used as the AMP controller ID.
1458 */
1459 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1460
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001461 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
1463 /* Find first available device id */
1464 list_for_each(p, &hci_dev_list) {
1465 if (list_entry(p, struct hci_dev, list)->id != id)
1466 break;
1467 head = p; id++;
1468 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 sprintf(hdev->name, "hci%d", id);
1471 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001472 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
1474 atomic_set(&hdev->refcnt, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001475 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001478 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001480 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001482 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Marcel Holtmann04837f62006-07-03 10:02:33 +02001484 hdev->idle_timeout = 0;
1485 hdev->sniff_max_interval = 800;
1486 hdev->sniff_min_interval = 80;
1487
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001488 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001489 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001490 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001491
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493 skb_queue_head_init(&hdev->rx_q);
1494 skb_queue_head_init(&hdev->cmd_q);
1495 skb_queue_head_init(&hdev->raw_q);
1496
Ville Tervo6bd32322011-02-16 16:32:41 +02001497 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1498
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301499 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001500 hdev->reassembly[i] = NULL;
1501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001503 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504
1505 inquiry_cache_init(hdev);
1506
1507 hci_conn_hash_init(hdev);
1508
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001509 INIT_LIST_HEAD(&hdev->mgmt_pending);
1510
David Millerea4bd8b2010-07-30 21:54:49 -07001511 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001512
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001513 INIT_LIST_HEAD(&hdev->uuids);
1514
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001515 INIT_LIST_HEAD(&hdev->link_keys);
1516
Szymon Janc2763eda2011-03-22 13:12:22 +01001517 INIT_LIST_HEAD(&hdev->remote_oob_data);
1518
Andre Guedes76c86862011-05-26 16:23:50 -03001519 INIT_LIST_HEAD(&hdev->adv_entries);
1520
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001521 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001522 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001523 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001524
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001525 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1526
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1528
1529 atomic_set(&hdev->promisc, 0);
1530
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001531 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001533 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1534 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001535 if (!hdev->workqueue) {
1536 error = -ENOMEM;
1537 goto err;
1538 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001539
David Herrmann33ca9542011-10-08 14:58:49 +02001540 error = hci_add_sysfs(hdev);
1541 if (error < 0)
1542 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001544 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1545 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1546 if (hdev->rfkill) {
1547 if (rfkill_register(hdev->rfkill) < 0) {
1548 rfkill_destroy(hdev->rfkill);
1549 hdev->rfkill = NULL;
1550 }
1551 }
1552
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001553 set_bit(HCI_AUTO_OFF, &hdev->flags);
1554 set_bit(HCI_SETUP, &hdev->flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001555 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001556
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 hci_notify(hdev, HCI_DEV_REG);
1558
1559 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001560
David Herrmann33ca9542011-10-08 14:58:49 +02001561err_wqueue:
1562 destroy_workqueue(hdev->workqueue);
1563err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001564 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001565 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001566 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001567
David Herrmann33ca9542011-10-08 14:58:49 +02001568 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569}
1570EXPORT_SYMBOL(hci_register_dev);
1571
1572/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001573void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574{
Marcel Holtmannef222012007-07-11 06:42:04 +02001575 int i;
1576
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001577 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001579 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001581 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 hci_dev_do_close(hdev);
1584
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301585 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001586 kfree_skb(hdev->reassembly[i]);
1587
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001588 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001589 !test_bit(HCI_SETUP, &hdev->flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001590 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001591 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001592 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001593 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001594
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001595 /* mgmt_index_removed should take care of emptying the
1596 * pending list */
1597 BUG_ON(!list_empty(&hdev->mgmt_pending));
1598
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 hci_notify(hdev, HCI_DEV_UNREG);
1600
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001601 if (hdev->rfkill) {
1602 rfkill_unregister(hdev->rfkill);
1603 rfkill_destroy(hdev->rfkill);
1604 }
1605
David Herrmannce242972011-10-08 14:58:48 +02001606 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001607
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001608 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001609
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001610 destroy_workqueue(hdev->workqueue);
1611
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001612 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001613 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001614 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001615 hci_link_keys_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001616 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001617 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001618 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001619
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 __hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621}
1622EXPORT_SYMBOL(hci_unregister_dev);
1623
1624/* Suspend HCI device */
1625int hci_suspend_dev(struct hci_dev *hdev)
1626{
1627 hci_notify(hdev, HCI_DEV_SUSPEND);
1628 return 0;
1629}
1630EXPORT_SYMBOL(hci_suspend_dev);
1631
1632/* Resume HCI device */
1633int hci_resume_dev(struct hci_dev *hdev)
1634{
1635 hci_notify(hdev, HCI_DEV_RESUME);
1636 return 0;
1637}
1638EXPORT_SYMBOL(hci_resume_dev);
1639
Marcel Holtmann76bca882009-11-18 00:40:39 +01001640/* Receive frame from HCI drivers */
1641int hci_recv_frame(struct sk_buff *skb)
1642{
1643 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1644 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1645 && !test_bit(HCI_INIT, &hdev->flags))) {
1646 kfree_skb(skb);
1647 return -ENXIO;
1648 }
1649
1650 /* Incomming skb */
1651 bt_cb(skb)->incoming = 1;
1652
1653 /* Time stamp */
1654 __net_timestamp(skb);
1655
Marcel Holtmann76bca882009-11-18 00:40:39 +01001656 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001657 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001658
Marcel Holtmann76bca882009-11-18 00:40:39 +01001659 return 0;
1660}
1661EXPORT_SYMBOL(hci_recv_frame);
1662
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301663static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001664 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301665{
1666 int len = 0;
1667 int hlen = 0;
1668 int remain = count;
1669 struct sk_buff *skb;
1670 struct bt_skb_cb *scb;
1671
1672 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1673 index >= NUM_REASSEMBLY)
1674 return -EILSEQ;
1675
1676 skb = hdev->reassembly[index];
1677
1678 if (!skb) {
1679 switch (type) {
1680 case HCI_ACLDATA_PKT:
1681 len = HCI_MAX_FRAME_SIZE;
1682 hlen = HCI_ACL_HDR_SIZE;
1683 break;
1684 case HCI_EVENT_PKT:
1685 len = HCI_MAX_EVENT_SIZE;
1686 hlen = HCI_EVENT_HDR_SIZE;
1687 break;
1688 case HCI_SCODATA_PKT:
1689 len = HCI_MAX_SCO_SIZE;
1690 hlen = HCI_SCO_HDR_SIZE;
1691 break;
1692 }
1693
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001694 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301695 if (!skb)
1696 return -ENOMEM;
1697
1698 scb = (void *) skb->cb;
1699 scb->expect = hlen;
1700 scb->pkt_type = type;
1701
1702 skb->dev = (void *) hdev;
1703 hdev->reassembly[index] = skb;
1704 }
1705
1706 while (count) {
1707 scb = (void *) skb->cb;
1708 len = min(scb->expect, (__u16)count);
1709
1710 memcpy(skb_put(skb, len), data, len);
1711
1712 count -= len;
1713 data += len;
1714 scb->expect -= len;
1715 remain = count;
1716
1717 switch (type) {
1718 case HCI_EVENT_PKT:
1719 if (skb->len == HCI_EVENT_HDR_SIZE) {
1720 struct hci_event_hdr *h = hci_event_hdr(skb);
1721 scb->expect = h->plen;
1722
1723 if (skb_tailroom(skb) < scb->expect) {
1724 kfree_skb(skb);
1725 hdev->reassembly[index] = NULL;
1726 return -ENOMEM;
1727 }
1728 }
1729 break;
1730
1731 case HCI_ACLDATA_PKT:
1732 if (skb->len == HCI_ACL_HDR_SIZE) {
1733 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1734 scb->expect = __le16_to_cpu(h->dlen);
1735
1736 if (skb_tailroom(skb) < scb->expect) {
1737 kfree_skb(skb);
1738 hdev->reassembly[index] = NULL;
1739 return -ENOMEM;
1740 }
1741 }
1742 break;
1743
1744 case HCI_SCODATA_PKT:
1745 if (skb->len == HCI_SCO_HDR_SIZE) {
1746 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1747 scb->expect = h->dlen;
1748
1749 if (skb_tailroom(skb) < scb->expect) {
1750 kfree_skb(skb);
1751 hdev->reassembly[index] = NULL;
1752 return -ENOMEM;
1753 }
1754 }
1755 break;
1756 }
1757
1758 if (scb->expect == 0) {
1759 /* Complete frame */
1760
1761 bt_cb(skb)->pkt_type = type;
1762 hci_recv_frame(skb);
1763
1764 hdev->reassembly[index] = NULL;
1765 return remain;
1766 }
1767 }
1768
1769 return remain;
1770}
1771
Marcel Holtmannef222012007-07-11 06:42:04 +02001772int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1773{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301774 int rem = 0;
1775
Marcel Holtmannef222012007-07-11 06:42:04 +02001776 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1777 return -EILSEQ;
1778
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001779 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001780 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301781 if (rem < 0)
1782 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001783
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301784 data += (count - rem);
1785 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001786 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001787
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301788 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001789}
1790EXPORT_SYMBOL(hci_recv_fragment);
1791
Suraj Sumangala99811512010-07-14 13:02:19 +05301792#define STREAM_REASSEMBLY 0
1793
1794int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1795{
1796 int type;
1797 int rem = 0;
1798
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001799 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301800 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1801
1802 if (!skb) {
1803 struct { char type; } *pkt;
1804
1805 /* Start of the frame */
1806 pkt = data;
1807 type = pkt->type;
1808
1809 data++;
1810 count--;
1811 } else
1812 type = bt_cb(skb)->pkt_type;
1813
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001814 rem = hci_reassembly(hdev, type, data, count,
1815 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301816 if (rem < 0)
1817 return rem;
1818
1819 data += (count - rem);
1820 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001821 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301822
1823 return rem;
1824}
1825EXPORT_SYMBOL(hci_recv_stream_fragment);
1826
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827/* ---- Interface to upper protocols ---- */
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829int hci_register_cb(struct hci_cb *cb)
1830{
1831 BT_DBG("%p name %s", cb, cb->name);
1832
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001833 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001835 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
1837 return 0;
1838}
1839EXPORT_SYMBOL(hci_register_cb);
1840
1841int hci_unregister_cb(struct hci_cb *cb)
1842{
1843 BT_DBG("%p name %s", cb, cb->name);
1844
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001845 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001847 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
1849 return 0;
1850}
1851EXPORT_SYMBOL(hci_unregister_cb);
1852
1853static int hci_send_frame(struct sk_buff *skb)
1854{
1855 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1856
1857 if (!hdev) {
1858 kfree_skb(skb);
1859 return -ENODEV;
1860 }
1861
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001862 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
1864 if (atomic_read(&hdev->promisc)) {
1865 /* Time stamp */
Patrick McHardya61bbcf2005-08-14 17:24:31 -07001866 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02001868 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 }
1870
1871 /* Get rid of skb owner, prior to sending to the driver. */
1872 skb_orphan(skb);
1873
1874 return hdev->send(skb);
1875}
1876
1877/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001878int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
1880 int len = HCI_COMMAND_HDR_SIZE + plen;
1881 struct hci_command_hdr *hdr;
1882 struct sk_buff *skb;
1883
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001884 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885
1886 skb = bt_skb_alloc(len, GFP_ATOMIC);
1887 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02001888 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 return -ENOMEM;
1890 }
1891
1892 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001893 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 hdr->plen = plen;
1895
1896 if (plen)
1897 memcpy(skb_put(skb, plen), param, plen);
1898
1899 BT_DBG("skb len %d", skb->len);
1900
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001901 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001903
Johan Hedberga5040ef2011-01-10 13:28:59 +02001904 if (test_bit(HCI_INIT, &hdev->flags))
1905 hdev->init_last_cmd = opcode;
1906
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001908 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
1910 return 0;
1911}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912
1913/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001914void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915{
1916 struct hci_command_hdr *hdr;
1917
1918 if (!hdev->sent_cmd)
1919 return NULL;
1920
1921 hdr = (void *) hdev->sent_cmd->data;
1922
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001923 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 return NULL;
1925
Marcel Holtmanna9de9242007-10-20 13:33:56 +02001926 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
1928 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1929}
1930
1931/* Send ACL data */
1932static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1933{
1934 struct hci_acl_hdr *hdr;
1935 int len = skb->len;
1936
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03001937 skb_push(skb, HCI_ACL_HDR_SIZE);
1938 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07001939 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07001940 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1941 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942}
1943
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001944static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1945 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946{
1947 struct hci_dev *hdev = conn->hdev;
1948 struct sk_buff *list;
1949
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001950 list = skb_shinfo(skb)->frag_list;
1951 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 /* Non fragmented */
1953 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1954
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001955 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 } else {
1957 /* Fragmented */
1958 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1959
1960 skb_shinfo(skb)->frag_list = NULL;
1961
1962 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02001963 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001965 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001966
1967 flags &= ~ACL_START;
1968 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 do {
1970 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07001973 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02001974 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1977
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001978 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 } while (list);
1980
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02001981 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001983}
1984
1985void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1986{
1987 struct hci_conn *conn = chan->conn;
1988 struct hci_dev *hdev = conn->hdev;
1989
1990 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1991
1992 skb->dev = (void *) hdev;
1993 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1994 hci_add_acl_hdr(skb, conn->handle, flags);
1995
1996 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001998 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999}
2000EXPORT_SYMBOL(hci_send_acl);
2001
2002/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002003void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004{
2005 struct hci_dev *hdev = conn->hdev;
2006 struct hci_sco_hdr hdr;
2007
2008 BT_DBG("%s len %d", hdev->name, skb->len);
2009
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002010 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 hdr.dlen = skb->len;
2012
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002013 skb_push(skb, HCI_SCO_HDR_SIZE);
2014 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002015 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002018 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002021 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022}
2023EXPORT_SYMBOL(hci_send_sco);
2024
2025/* ---- HCI TX task (outgoing data) ---- */
2026
2027/* HCI Connection scheduler */
2028static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2029{
2030 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002031 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002034 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002036
2037 rcu_read_lock();
2038
2039 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002040 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002042
2043 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2044 continue;
2045
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 num++;
2047
2048 if (c->sent < min) {
2049 min = c->sent;
2050 conn = c;
2051 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002052
2053 if (hci_conn_num(hdev, type) == num)
2054 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 }
2056
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002057 rcu_read_unlock();
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002060 int cnt, q;
2061
2062 switch (conn->type) {
2063 case ACL_LINK:
2064 cnt = hdev->acl_cnt;
2065 break;
2066 case SCO_LINK:
2067 case ESCO_LINK:
2068 cnt = hdev->sco_cnt;
2069 break;
2070 case LE_LINK:
2071 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2072 break;
2073 default:
2074 cnt = 0;
2075 BT_ERR("Unknown link type");
2076 }
2077
2078 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 *quote = q ? q : 1;
2080 } else
2081 *quote = 0;
2082
2083 BT_DBG("conn %p quote %d", conn, *quote);
2084 return conn;
2085}
2086
Ville Tervobae1f5d92011-02-10 22:38:53 -03002087static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088{
2089 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002090 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
Ville Tervobae1f5d92011-02-10 22:38:53 -03002092 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002094 rcu_read_lock();
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002097 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002098 if (c->type == type && c->sent) {
2099 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 hdev->name, batostr(&c->dst));
2101 hci_acl_disconn(c, 0x13);
2102 }
2103 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002104
2105 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106}
2107
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002108static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2109 int *quote)
2110{
2111 struct hci_conn_hash *h = &hdev->conn_hash;
2112 struct hci_chan *chan = NULL;
2113 int num = 0, min = ~0, cur_prio = 0;
2114 struct hci_conn *conn;
2115 int cnt, q, conn_num = 0;
2116
2117 BT_DBG("%s", hdev->name);
2118
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002119 rcu_read_lock();
2120
2121 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002122 struct hci_chan *tmp;
2123
2124 if (conn->type != type)
2125 continue;
2126
2127 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2128 continue;
2129
2130 conn_num++;
2131
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002132 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002133 struct sk_buff *skb;
2134
2135 if (skb_queue_empty(&tmp->data_q))
2136 continue;
2137
2138 skb = skb_peek(&tmp->data_q);
2139 if (skb->priority < cur_prio)
2140 continue;
2141
2142 if (skb->priority > cur_prio) {
2143 num = 0;
2144 min = ~0;
2145 cur_prio = skb->priority;
2146 }
2147
2148 num++;
2149
2150 if (conn->sent < min) {
2151 min = conn->sent;
2152 chan = tmp;
2153 }
2154 }
2155
2156 if (hci_conn_num(hdev, type) == conn_num)
2157 break;
2158 }
2159
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002160 rcu_read_unlock();
2161
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002162 if (!chan)
2163 return NULL;
2164
2165 switch (chan->conn->type) {
2166 case ACL_LINK:
2167 cnt = hdev->acl_cnt;
2168 break;
2169 case SCO_LINK:
2170 case ESCO_LINK:
2171 cnt = hdev->sco_cnt;
2172 break;
2173 case LE_LINK:
2174 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2175 break;
2176 default:
2177 cnt = 0;
2178 BT_ERR("Unknown link type");
2179 }
2180
2181 q = cnt / num;
2182 *quote = q ? q : 1;
2183 BT_DBG("chan %p quote %d", chan, *quote);
2184 return chan;
2185}
2186
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002187static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2188{
2189 struct hci_conn_hash *h = &hdev->conn_hash;
2190 struct hci_conn *conn;
2191 int num = 0;
2192
2193 BT_DBG("%s", hdev->name);
2194
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002195 rcu_read_lock();
2196
2197 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002198 struct hci_chan *chan;
2199
2200 if (conn->type != type)
2201 continue;
2202
2203 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2204 continue;
2205
2206 num++;
2207
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002208 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002209 struct sk_buff *skb;
2210
2211 if (chan->sent) {
2212 chan->sent = 0;
2213 continue;
2214 }
2215
2216 if (skb_queue_empty(&chan->data_q))
2217 continue;
2218
2219 skb = skb_peek(&chan->data_q);
2220 if (skb->priority >= HCI_PRIO_MAX - 1)
2221 continue;
2222
2223 skb->priority = HCI_PRIO_MAX - 1;
2224
2225 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2226 skb->priority);
2227 }
2228
2229 if (hci_conn_num(hdev, type) == num)
2230 break;
2231 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002232
2233 rcu_read_unlock();
2234
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002235}
2236
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237static inline void hci_sched_acl(struct hci_dev *hdev)
2238{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002239 struct hci_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 struct sk_buff *skb;
2241 int quote;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002242 unsigned int cnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 BT_DBG("%s", hdev->name);
2245
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002246 if (!hci_conn_num(hdev, ACL_LINK))
2247 return;
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 if (!test_bit(HCI_RAW, &hdev->flags)) {
2250 /* ACL tx timeout must be longer than maximum
2251 * link supervision timeout (40.9 seconds) */
S.Çağlar Onur824530212008-02-17 23:25:57 -08002252 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002253 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 }
2255
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002256 cnt = hdev->acl_cnt;
Marcel Holtmann04837f62006-07-03 10:02:33 +02002257
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002258 while (hdev->acl_cnt &&
2259 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002260 u32 priority = (skb_peek(&chan->data_q))->priority;
2261 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002262 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2263 skb->len, skb->priority);
2264
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002265 /* Stop if priority has changed */
2266 if (skb->priority < priority)
2267 break;
2268
2269 skb = skb_dequeue(&chan->data_q);
2270
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002271 hci_conn_enter_active_mode(chan->conn,
2272 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002273
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 hci_send_frame(skb);
2275 hdev->acl_last_tx = jiffies;
2276
2277 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002278 chan->sent++;
2279 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 }
2281 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002282
2283 if (cnt != hdev->acl_cnt)
2284 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
2286
2287/* Schedule SCO */
2288static inline void hci_sched_sco(struct hci_dev *hdev)
2289{
2290 struct hci_conn *conn;
2291 struct sk_buff *skb;
2292 int quote;
2293
2294 BT_DBG("%s", hdev->name);
2295
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002296 if (!hci_conn_num(hdev, SCO_LINK))
2297 return;
2298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2300 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2301 BT_DBG("skb %p len %d", skb, skb->len);
2302 hci_send_frame(skb);
2303
2304 conn->sent++;
2305 if (conn->sent == ~0)
2306 conn->sent = 0;
2307 }
2308 }
2309}
2310
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002311static inline void hci_sched_esco(struct hci_dev *hdev)
2312{
2313 struct hci_conn *conn;
2314 struct sk_buff *skb;
2315 int quote;
2316
2317 BT_DBG("%s", hdev->name);
2318
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002319 if (!hci_conn_num(hdev, ESCO_LINK))
2320 return;
2321
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002322 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2323 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2324 BT_DBG("skb %p len %d", skb, skb->len);
2325 hci_send_frame(skb);
2326
2327 conn->sent++;
2328 if (conn->sent == ~0)
2329 conn->sent = 0;
2330 }
2331 }
2332}
2333
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002334static inline void hci_sched_le(struct hci_dev *hdev)
2335{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002336 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002337 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002338 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002339
2340 BT_DBG("%s", hdev->name);
2341
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002342 if (!hci_conn_num(hdev, LE_LINK))
2343 return;
2344
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002345 if (!test_bit(HCI_RAW, &hdev->flags)) {
2346 /* LE tx timeout must be longer than maximum
2347 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002348 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002349 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002350 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002351 }
2352
2353 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002354 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002355 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002356 u32 priority = (skb_peek(&chan->data_q))->priority;
2357 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002358 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2359 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002360
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002361 /* Stop if priority has changed */
2362 if (skb->priority < priority)
2363 break;
2364
2365 skb = skb_dequeue(&chan->data_q);
2366
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002367 hci_send_frame(skb);
2368 hdev->le_last_tx = jiffies;
2369
2370 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002371 chan->sent++;
2372 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002373 }
2374 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002375
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002376 if (hdev->le_pkts)
2377 hdev->le_cnt = cnt;
2378 else
2379 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002380
2381 if (cnt != tmp)
2382 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002383}
2384
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002385static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002387 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 struct sk_buff *skb;
2389
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002390 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2391 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393 /* Schedule queues and send stuff to HCI driver */
2394
2395 hci_sched_acl(hdev);
2396
2397 hci_sched_sco(hdev);
2398
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002399 hci_sched_esco(hdev);
2400
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002401 hci_sched_le(hdev);
2402
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 /* Send next queued raw (unknown type) packet */
2404 while ((skb = skb_dequeue(&hdev->raw_q)))
2405 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406}
2407
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002408/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
2410/* ACL data packet */
2411static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2412{
2413 struct hci_acl_hdr *hdr = (void *) skb->data;
2414 struct hci_conn *conn;
2415 __u16 handle, flags;
2416
2417 skb_pull(skb, HCI_ACL_HDR_SIZE);
2418
2419 handle = __le16_to_cpu(hdr->handle);
2420 flags = hci_flags(handle);
2421 handle = hci_handle(handle);
2422
2423 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2424
2425 hdev->stat.acl_rx++;
2426
2427 hci_dev_lock(hdev);
2428 conn = hci_conn_hash_lookup_handle(hdev, handle);
2429 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002430
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002432 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002433
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002435 l2cap_recv_acldata(conn, skb, flags);
2436 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002438 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 hdev->name, handle);
2440 }
2441
2442 kfree_skb(skb);
2443}
2444
2445/* SCO data packet */
2446static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2447{
2448 struct hci_sco_hdr *hdr = (void *) skb->data;
2449 struct hci_conn *conn;
2450 __u16 handle;
2451
2452 skb_pull(skb, HCI_SCO_HDR_SIZE);
2453
2454 handle = __le16_to_cpu(hdr->handle);
2455
2456 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2457
2458 hdev->stat.sco_rx++;
2459
2460 hci_dev_lock(hdev);
2461 conn = hci_conn_hash_lookup_handle(hdev, handle);
2462 hci_dev_unlock(hdev);
2463
2464 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002466 sco_recv_scodata(conn, skb);
2467 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002469 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 hdev->name, handle);
2471 }
2472
2473 kfree_skb(skb);
2474}
2475
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002476static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002478 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 struct sk_buff *skb;
2480
2481 BT_DBG("%s", hdev->name);
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 while ((skb = skb_dequeue(&hdev->rx_q))) {
2484 if (atomic_read(&hdev->promisc)) {
2485 /* Send copy to the sockets */
Johan Hedbergeec8d2b2010-12-16 10:17:38 +02002486 hci_send_to_sock(hdev, skb, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 }
2488
2489 if (test_bit(HCI_RAW, &hdev->flags)) {
2490 kfree_skb(skb);
2491 continue;
2492 }
2493
2494 if (test_bit(HCI_INIT, &hdev->flags)) {
2495 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002496 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 case HCI_ACLDATA_PKT:
2498 case HCI_SCODATA_PKT:
2499 kfree_skb(skb);
2500 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 }
2503
2504 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002505 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002507 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 hci_event_packet(hdev, skb);
2509 break;
2510
2511 case HCI_ACLDATA_PKT:
2512 BT_DBG("%s ACL data packet", hdev->name);
2513 hci_acldata_packet(hdev, skb);
2514 break;
2515
2516 case HCI_SCODATA_PKT:
2517 BT_DBG("%s SCO data packet", hdev->name);
2518 hci_scodata_packet(hdev, skb);
2519 break;
2520
2521 default:
2522 kfree_skb(skb);
2523 break;
2524 }
2525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526}
2527
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002528static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002530 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 struct sk_buff *skb;
2532
2533 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2534
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002536 if (atomic_read(&hdev->cmd_cnt)) {
2537 skb = skb_dequeue(&hdev->cmd_q);
2538 if (!skb)
2539 return;
2540
Wei Yongjun7585b972009-02-25 18:29:52 +08002541 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002543 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2544 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 atomic_dec(&hdev->cmd_cnt);
2546 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002547 if (test_bit(HCI_RESET, &hdev->flags))
2548 del_timer(&hdev->cmd_timer);
2549 else
2550 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002551 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 } else {
2553 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002554 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 }
2556 }
2557}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002558
2559int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2560{
2561 /* General inquiry access code (GIAC) */
2562 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2563 struct hci_cp_inquiry cp;
2564
2565 BT_DBG("%s", hdev->name);
2566
2567 if (test_bit(HCI_INQUIRY, &hdev->flags))
2568 return -EINPROGRESS;
2569
2570 memset(&cp, 0, sizeof(cp));
2571 memcpy(&cp.lap, lap, sizeof(cp.lap));
2572 cp.length = length;
2573
2574 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2575}
Andre Guedes023d50492011-11-04 14:16:52 -03002576
2577int hci_cancel_inquiry(struct hci_dev *hdev)
2578{
2579 BT_DBG("%s", hdev->name);
2580
2581 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2582 return -EPERM;
2583
2584 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2585}
Andrei Emeltchenko7784d782011-11-18 13:35:42 +02002586
2587module_param(enable_hs, bool, 0644);
2588MODULE_PARM_DESC(enable_hs, "Enable High Speed");