blob: 0aa3924066298333ca1fa360472812b49c696a42 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440
1441 if (lmp_le_capable(hdev)) {
1442 memset(events, 0, sizeof(events));
1443 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1445 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 }
1447}
1448
Johan Hedberg42c6b122013-03-05 20:37:49 +02001449static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001451 struct hci_dev *hdev = req->hdev;
1452
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001455 else
1456 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001457
1458 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460
Johan Hedberg42c6b122013-03-05 20:37:49 +02001461 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001462
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001463 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1464 * local supported commands HCI command.
1465 */
1466 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001468
1469 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001470 /* When SSP is available, then the host features page
1471 * should also be available as well. However some
1472 * controllers list the max_page as 0 as long as SSP
1473 * has not been enabled. To achieve proper debugging
1474 * output, force the minimum max_page to 1 at least.
1475 */
1476 hdev->max_page = 0x01;
1477
Johan Hedberg2177bab2013-03-05 20:37:43 +02001478 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1479 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1481 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 } else {
1483 struct hci_cp_write_eir cp;
1484
1485 memset(hdev->eir, 0, sizeof(hdev->eir));
1486 memset(&cp, 0, sizeof(cp));
1487
Johan Hedberg42c6b122013-03-05 20:37:49 +02001488 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001489 }
1490 }
1491
1492 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001493 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001494
1495 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001496 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497
1498 if (lmp_ext_feat_capable(hdev)) {
1499 struct hci_cp_read_local_ext_features cp;
1500
1501 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001502 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1503 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001504 }
1505
1506 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1507 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1509 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001510 }
1511}
1512
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001514{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516 struct hci_cp_write_def_link_policy cp;
1517 u16 link_policy = 0;
1518
1519 if (lmp_rswitch_capable(hdev))
1520 link_policy |= HCI_LP_RSWITCH;
1521 if (lmp_hold_capable(hdev))
1522 link_policy |= HCI_LP_HOLD;
1523 if (lmp_sniff_capable(hdev))
1524 link_policy |= HCI_LP_SNIFF;
1525 if (lmp_park_capable(hdev))
1526 link_policy |= HCI_LP_PARK;
1527
1528 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001529 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001530}
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001534 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 struct hci_cp_write_le_host_supported cp;
1536
Johan Hedbergc73eee92013-04-19 18:35:21 +03001537 /* LE-only devices do not support explicit enablement */
1538 if (!lmp_bredr_capable(hdev))
1539 return;
1540
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 memset(&cp, 0, sizeof(cp));
1542
1543 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1544 cp.le = 0x01;
1545 cp.simul = lmp_le_br_capable(hdev);
1546 }
1547
1548 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1550 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001551}
1552
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001553static void hci_set_event_mask_page_2(struct hci_request *req)
1554{
1555 struct hci_dev *hdev = req->hdev;
1556 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1557
1558 /* If Connectionless Slave Broadcast master role is supported
1559 * enable all necessary events for it.
1560 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001561 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001562 events[1] |= 0x40; /* Triggered Clock Capture */
1563 events[1] |= 0x80; /* Synchronization Train Complete */
1564 events[2] |= 0x10; /* Slave Page Response Timeout */
1565 events[2] |= 0x20; /* CSB Channel Map Change */
1566 }
1567
1568 /* If Connectionless Slave Broadcast slave role is supported
1569 * enable all necessary events for it.
1570 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001571 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001572 events[2] |= 0x01; /* Synchronization Train Received */
1573 events[2] |= 0x02; /* CSB Receive */
1574 events[2] |= 0x04; /* CSB Timeout */
1575 events[2] |= 0x08; /* Truncated Page Complete */
1576 }
1577
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001578 /* Enable Authenticated Payload Timeout Expired event if supported */
1579 if (lmp_ping_capable(hdev))
1580 events[2] |= 0x80;
1581
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001582 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1583}
1584
Johan Hedberg42c6b122013-03-05 20:37:49 +02001585static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001586{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001587 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001588 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001590 /* Some Broadcom based Bluetooth controllers do not support the
1591 * Delete Stored Link Key command. They are clearly indicating its
1592 * absence in the bit mask of supported commands.
1593 *
1594 * Check the supported commands and only if the the command is marked
1595 * as supported send it. If not supported assume that the controller
1596 * does not have actual support for stored link keys which makes this
1597 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001598 *
1599 * Some controllers indicate that they support handling deleting
1600 * stored link keys, but they don't. The quirk lets a driver
1601 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001602 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001603 if (hdev->commands[6] & 0x80 &&
1604 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001605 struct hci_cp_delete_stored_link_key cp;
1606
1607 bacpy(&cp.bdaddr, BDADDR_ANY);
1608 cp.delete_all = 0x01;
1609 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1610 sizeof(cp), &cp);
1611 }
1612
Johan Hedberg2177bab2013-03-05 20:37:43 +02001613 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001614 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001615
Johan Hedberg7bf32042014-02-23 19:42:29 +02001616 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001617 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001618
1619 /* Read features beyond page 1 if available */
1620 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1621 struct hci_cp_read_local_ext_features cp;
1622
1623 cp.page = p;
1624 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1625 sizeof(cp), &cp);
1626 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001627}
1628
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001629static void hci_init4_req(struct hci_request *req, unsigned long opt)
1630{
1631 struct hci_dev *hdev = req->hdev;
1632
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001633 /* Set event mask page 2 if the HCI command for it is supported */
1634 if (hdev->commands[22] & 0x04)
1635 hci_set_event_mask_page_2(req);
1636
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001637 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001638 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001639 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001640
1641 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001642 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001643 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001644 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1645 u8 support = 0x01;
1646 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1647 sizeof(support), &support);
1648 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649}
1650
Johan Hedberg2177bab2013-03-05 20:37:43 +02001651static int __hci_init(struct hci_dev *hdev)
1652{
1653 int err;
1654
1655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1656 if (err < 0)
1657 return err;
1658
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001659 /* The Device Under Test (DUT) mode is special and available for
1660 * all controller types. So just create it early on.
1661 */
1662 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1663 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1664 &dut_mode_fops);
1665 }
1666
Johan Hedberg2177bab2013-03-05 20:37:43 +02001667 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1668 * BR/EDR/LE type controllers. AMP controllers only need the
1669 * first stage init.
1670 */
1671 if (hdev->dev_type != HCI_BREDR)
1672 return 0;
1673
1674 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1675 if (err < 0)
1676 return err;
1677
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001678 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1679 if (err < 0)
1680 return err;
1681
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001682 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1683 if (err < 0)
1684 return err;
1685
1686 /* Only create debugfs entries during the initial setup
1687 * phase and not every time the controller gets powered on.
1688 */
1689 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1690 return 0;
1691
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001692 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1693 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001694 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1695 &hdev->manufacturer);
1696 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1697 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001698 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1699 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001700 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1701
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001702 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1703 &conn_info_min_age_fops);
1704 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1705 &conn_info_max_age_fops);
1706
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001707 if (lmp_bredr_capable(hdev)) {
1708 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1709 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001710 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1711 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001712 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1713 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001714 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1715 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001716 }
1717
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001718 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001719 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1720 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001721 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1722 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001723 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1724 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001725 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001726
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001727 if (lmp_sniff_capable(hdev)) {
1728 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1729 hdev, &idle_timeout_fops);
1730 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1731 hdev, &sniff_min_interval_fops);
1732 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1733 hdev, &sniff_max_interval_fops);
1734 }
1735
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001736 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001737 debugfs_create_file("identity", 0400, hdev->debugfs,
1738 hdev, &identity_fops);
1739 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1740 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001741 debugfs_create_file("random_address", 0444, hdev->debugfs,
1742 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001743 debugfs_create_file("static_address", 0444, hdev->debugfs,
1744 hdev, &static_address_fops);
1745
1746 /* For controllers with a public address, provide a debug
1747 * option to force the usage of the configured static
1748 * address. By default the public address is used.
1749 */
1750 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1751 debugfs_create_file("force_static_address", 0644,
1752 hdev->debugfs, hdev,
1753 &force_static_address_fops);
1754
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001755 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1756 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001757 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1758 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001759 debugfs_create_file("identity_resolving_keys", 0400,
1760 hdev->debugfs, hdev,
1761 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001762 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1763 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001764 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1765 hdev, &conn_min_interval_fops);
1766 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1767 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001768 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1769 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001770 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1771 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001772 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1773 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001774 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1775 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001776 debugfs_create_u16("discov_interleaved_timeout", 0644,
1777 hdev->debugfs,
1778 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001779 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001780
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001781 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001782}
1783
Johan Hedberg42c6b122013-03-05 20:37:49 +02001784static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785{
1786 __u8 scan = opt;
1787
Johan Hedberg42c6b122013-03-05 20:37:49 +02001788 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
1790 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001791 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792}
1793
Johan Hedberg42c6b122013-03-05 20:37:49 +02001794static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795{
1796 __u8 auth = opt;
1797
Johan Hedberg42c6b122013-03-05 20:37:49 +02001798 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799
1800 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001801 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802}
1803
Johan Hedberg42c6b122013-03-05 20:37:49 +02001804static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805{
1806 __u8 encrypt = opt;
1807
Johan Hedberg42c6b122013-03-05 20:37:49 +02001808 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001810 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001811 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812}
1813
Johan Hedberg42c6b122013-03-05 20:37:49 +02001814static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001815{
1816 __le16 policy = cpu_to_le16(opt);
1817
Johan Hedberg42c6b122013-03-05 20:37:49 +02001818 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001819
1820 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001821 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001822}
1823
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001824/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 * Device is held on return. */
1826struct hci_dev *hci_dev_get(int index)
1827{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001828 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
1830 BT_DBG("%d", index);
1831
1832 if (index < 0)
1833 return NULL;
1834
1835 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001836 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 if (d->id == index) {
1838 hdev = hci_dev_hold(d);
1839 break;
1840 }
1841 }
1842 read_unlock(&hci_dev_list_lock);
1843 return hdev;
1844}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
1846/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001847
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001848bool hci_discovery_active(struct hci_dev *hdev)
1849{
1850 struct discovery_state *discov = &hdev->discovery;
1851
Andre Guedes6fbe1952012-02-03 17:47:58 -03001852 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001853 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001854 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001855 return true;
1856
Andre Guedes6fbe1952012-02-03 17:47:58 -03001857 default:
1858 return false;
1859 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001860}
1861
Johan Hedbergff9ef572012-01-04 14:23:45 +02001862void hci_discovery_set_state(struct hci_dev *hdev, int state)
1863{
1864 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1865
1866 if (hdev->discovery.state == state)
1867 return;
1868
1869 switch (state) {
1870 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001871 hci_update_background_scan(hdev);
1872
Andre Guedes7b99b652012-02-13 15:41:02 -03001873 if (hdev->discovery.state != DISCOVERY_STARTING)
1874 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001875 break;
1876 case DISCOVERY_STARTING:
1877 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001878 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001879 mgmt_discovering(hdev, 1);
1880 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001881 case DISCOVERY_RESOLVING:
1882 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001883 case DISCOVERY_STOPPING:
1884 break;
1885 }
1886
1887 hdev->discovery.state = state;
1888}
1889
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001890void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891{
Johan Hedberg30883512012-01-04 14:16:21 +02001892 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001893 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894
Johan Hedberg561aafb2012-01-04 13:31:59 +02001895 list_for_each_entry_safe(p, n, &cache->all, all) {
1896 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001897 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001898 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001899
1900 INIT_LIST_HEAD(&cache->unknown);
1901 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902}
1903
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001904struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1905 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906{
Johan Hedberg30883512012-01-04 14:16:21 +02001907 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 struct inquiry_entry *e;
1909
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001910 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911
Johan Hedberg561aafb2012-01-04 13:31:59 +02001912 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001914 return e;
1915 }
1916
1917 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918}
1919
Johan Hedberg561aafb2012-01-04 13:31:59 +02001920struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001921 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001922{
Johan Hedberg30883512012-01-04 14:16:21 +02001923 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001924 struct inquiry_entry *e;
1925
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001926 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001927
1928 list_for_each_entry(e, &cache->unknown, list) {
1929 if (!bacmp(&e->data.bdaddr, bdaddr))
1930 return e;
1931 }
1932
1933 return NULL;
1934}
1935
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001936struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001937 bdaddr_t *bdaddr,
1938 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001939{
1940 struct discovery_state *cache = &hdev->discovery;
1941 struct inquiry_entry *e;
1942
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001943 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001944
1945 list_for_each_entry(e, &cache->resolve, list) {
1946 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1947 return e;
1948 if (!bacmp(&e->data.bdaddr, bdaddr))
1949 return e;
1950 }
1951
1952 return NULL;
1953}
1954
Johan Hedberga3d4e202012-01-09 00:53:02 +02001955void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001956 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001957{
1958 struct discovery_state *cache = &hdev->discovery;
1959 struct list_head *pos = &cache->resolve;
1960 struct inquiry_entry *p;
1961
1962 list_del(&ie->list);
1963
1964 list_for_each_entry(p, &cache->resolve, list) {
1965 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001966 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001967 break;
1968 pos = &p->list;
1969 }
1970
1971 list_add(&ie->list, pos);
1972}
1973
Marcel Holtmannaf589252014-07-01 14:11:20 +02001974u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1975 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976{
Johan Hedberg30883512012-01-04 14:16:21 +02001977 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001978 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02001979 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001981 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
Szymon Janc2b2fec42012-11-20 11:38:54 +01001983 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1984
Marcel Holtmannaf589252014-07-01 14:11:20 +02001985 if (!data->ssp_mode)
1986 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001987
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001988 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001989 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02001990 if (!ie->data.ssp_mode)
1991 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001992
Johan Hedberga3d4e202012-01-09 00:53:02 +02001993 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001994 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001995 ie->data.rssi = data->rssi;
1996 hci_inquiry_cache_update_resolve(hdev, ie);
1997 }
1998
Johan Hedberg561aafb2012-01-04 13:31:59 +02001999 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002000 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002001
Johan Hedberg561aafb2012-01-04 13:31:59 +02002002 /* Entry not in the cache. Add new one. */
2003 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002004 if (!ie) {
2005 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2006 goto done;
2007 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002008
2009 list_add(&ie->all, &cache->all);
2010
2011 if (name_known) {
2012 ie->name_state = NAME_KNOWN;
2013 } else {
2014 ie->name_state = NAME_NOT_KNOWN;
2015 list_add(&ie->list, &cache->unknown);
2016 }
2017
2018update:
2019 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002020 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002021 ie->name_state = NAME_KNOWN;
2022 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023 }
2024
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002025 memcpy(&ie->data, data, sizeof(*data));
2026 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002028
2029 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002030 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002031
Marcel Holtmannaf589252014-07-01 14:11:20 +02002032done:
2033 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034}
2035
2036static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2037{
Johan Hedberg30883512012-01-04 14:16:21 +02002038 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 struct inquiry_info *info = (struct inquiry_info *) buf;
2040 struct inquiry_entry *e;
2041 int copied = 0;
2042
Johan Hedberg561aafb2012-01-04 13:31:59 +02002043 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002045
2046 if (copied >= num)
2047 break;
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 bacpy(&info->bdaddr, &data->bdaddr);
2050 info->pscan_rep_mode = data->pscan_rep_mode;
2051 info->pscan_period_mode = data->pscan_period_mode;
2052 info->pscan_mode = data->pscan_mode;
2053 memcpy(info->dev_class, data->dev_class, 3);
2054 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002057 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 }
2059
2060 BT_DBG("cache %p, copied %d", cache, copied);
2061 return copied;
2062}
2063
Johan Hedberg42c6b122013-03-05 20:37:49 +02002064static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065{
2066 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002067 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 struct hci_cp_inquiry cp;
2069
2070 BT_DBG("%s", hdev->name);
2071
2072 if (test_bit(HCI_INQUIRY, &hdev->flags))
2073 return;
2074
2075 /* Start Inquiry */
2076 memcpy(&cp.lap, &ir->lap, 3);
2077 cp.length = ir->length;
2078 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002079 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080}
2081
Andre Guedes3e13fa12013-03-27 20:04:56 -03002082static int wait_inquiry(void *word)
2083{
2084 schedule();
2085 return signal_pending(current);
2086}
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088int hci_inquiry(void __user *arg)
2089{
2090 __u8 __user *ptr = arg;
2091 struct hci_inquiry_req ir;
2092 struct hci_dev *hdev;
2093 int err = 0, do_inquiry = 0, max_rsp;
2094 long timeo;
2095 __u8 *buf;
2096
2097 if (copy_from_user(&ir, ptr, sizeof(ir)))
2098 return -EFAULT;
2099
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002100 hdev = hci_dev_get(ir.dev_id);
2101 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 return -ENODEV;
2103
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002104 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2105 err = -EBUSY;
2106 goto done;
2107 }
2108
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002109 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2110 err = -EOPNOTSUPP;
2111 goto done;
2112 }
2113
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002114 if (hdev->dev_type != HCI_BREDR) {
2115 err = -EOPNOTSUPP;
2116 goto done;
2117 }
2118
Johan Hedberg56f87902013-10-02 13:43:13 +03002119 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002124 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002125 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002126 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002127 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 do_inquiry = 1;
2129 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002130 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Marcel Holtmann04837f62006-07-03 10:02:33 +02002132 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002133
2134 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002135 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2136 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002137 if (err < 0)
2138 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002139
2140 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2141 * cleared). If it is interrupted by a signal, return -EINTR.
2142 */
2143 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2144 TASK_INTERRUPTIBLE))
2145 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002148 /* for unlimited number of responses we will use buffer with
2149 * 255 entries
2150 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2152
2153 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2154 * copy it to the user space.
2155 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002156 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002157 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 err = -ENOMEM;
2159 goto done;
2160 }
2161
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002162 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002164 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 BT_DBG("num_rsp %d", ir.num_rsp);
2167
2168 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2169 ptr += sizeof(ir);
2170 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002171 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002173 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 err = -EFAULT;
2175
2176 kfree(buf);
2177
2178done:
2179 hci_dev_put(hdev);
2180 return err;
2181}
2182
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002183static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 int ret = 0;
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 BT_DBG("%s %p", hdev->name, hdev);
2188
2189 hci_req_lock(hdev);
2190
Johan Hovold94324962012-03-15 14:48:41 +01002191 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2192 ret = -ENODEV;
2193 goto done;
2194 }
2195
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002196 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2197 /* Check for rfkill but allow the HCI setup stage to
2198 * proceed (which in itself doesn't cause any RF activity).
2199 */
2200 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2201 ret = -ERFKILL;
2202 goto done;
2203 }
2204
2205 /* Check for valid public address or a configured static
2206 * random adddress, but let the HCI setup proceed to
2207 * be able to determine if there is a public address
2208 * or not.
2209 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002210 * In case of user channel usage, it is not important
2211 * if a public address or static random address is
2212 * available.
2213 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002214 * This check is only valid for BR/EDR controllers
2215 * since AMP controllers do not have an address.
2216 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002217 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2218 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002219 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2220 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2221 ret = -EADDRNOTAVAIL;
2222 goto done;
2223 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002224 }
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 if (test_bit(HCI_UP, &hdev->flags)) {
2227 ret = -EALREADY;
2228 goto done;
2229 }
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 if (hdev->open(hdev)) {
2232 ret = -EIO;
2233 goto done;
2234 }
2235
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002236 atomic_set(&hdev->cmd_cnt, 1);
2237 set_bit(HCI_INIT, &hdev->flags);
2238
2239 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2240 ret = hdev->setup(hdev);
2241
2242 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002243 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002244 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002245 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 }
2247
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002248 clear_bit(HCI_INIT, &hdev->flags);
2249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 if (!ret) {
2251 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002252 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 set_bit(HCI_UP, &hdev->flags);
2254 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002255 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002256 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002257 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002258 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002259 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002260 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002261 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002262 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002264 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002265 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002266 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267
2268 skb_queue_purge(&hdev->cmd_q);
2269 skb_queue_purge(&hdev->rx_q);
2270
2271 if (hdev->flush)
2272 hdev->flush(hdev);
2273
2274 if (hdev->sent_cmd) {
2275 kfree_skb(hdev->sent_cmd);
2276 hdev->sent_cmd = NULL;
2277 }
2278
2279 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002280 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 }
2282
2283done:
2284 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 return ret;
2286}
2287
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002288/* ---- HCI ioctl helpers ---- */
2289
2290int hci_dev_open(__u16 dev)
2291{
2292 struct hci_dev *hdev;
2293 int err;
2294
2295 hdev = hci_dev_get(dev);
2296 if (!hdev)
2297 return -ENODEV;
2298
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002299 /* Devices that are marked for raw-only usage can only be powered
2300 * up as user channel. Trying to bring them up as normal devices
2301 * will result into a failure. Only user channel operation is
2302 * possible.
2303 *
2304 * When this function is called for a user channel, the flag
2305 * HCI_USER_CHANNEL will be set first before attempting to
2306 * open the device.
2307 */
2308 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2309 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2310 err = -EOPNOTSUPP;
2311 goto done;
2312 }
2313
Johan Hedberge1d08f42013-10-01 22:44:50 +03002314 /* We need to ensure that no other power on/off work is pending
2315 * before proceeding to call hci_dev_do_open. This is
2316 * particularly important if the setup procedure has not yet
2317 * completed.
2318 */
2319 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2320 cancel_delayed_work(&hdev->power_off);
2321
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002322 /* After this call it is guaranteed that the setup procedure
2323 * has finished. This means that error conditions like RFKILL
2324 * or no valid public or static random address apply.
2325 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002326 flush_workqueue(hdev->req_workqueue);
2327
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002328 err = hci_dev_do_open(hdev);
2329
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002330done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002331 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002332 return err;
2333}
2334
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335static int hci_dev_do_close(struct hci_dev *hdev)
2336{
2337 BT_DBG("%s %p", hdev->name, hdev);
2338
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002339 cancel_delayed_work(&hdev->power_off);
2340
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 hci_req_cancel(hdev, ENODEV);
2342 hci_req_lock(hdev);
2343
2344 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002345 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 hci_req_unlock(hdev);
2347 return 0;
2348 }
2349
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002350 /* Flush RX and TX works */
2351 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002352 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002354 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002355 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002356 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002357 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002358 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002359 }
2360
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002361 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002362 cancel_delayed_work(&hdev->service_cache);
2363
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002364 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002365
2366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2367 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002368
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002369 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002370 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002372 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002373 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
2375 hci_notify(hdev, HCI_DEV_DOWN);
2376
2377 if (hdev->flush)
2378 hdev->flush(hdev);
2379
2380 /* Reset device */
2381 skb_queue_purge(&hdev->cmd_q);
2382 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002383 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002384 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002385 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002387 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 clear_bit(HCI_INIT, &hdev->flags);
2389 }
2390
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002391 /* flush cmd work */
2392 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
2394 /* Drop queues */
2395 skb_queue_purge(&hdev->rx_q);
2396 skb_queue_purge(&hdev->cmd_q);
2397 skb_queue_purge(&hdev->raw_q);
2398
2399 /* Drop last sent command */
2400 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002401 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 kfree_skb(hdev->sent_cmd);
2403 hdev->sent_cmd = NULL;
2404 }
2405
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002406 kfree_skb(hdev->recv_evt);
2407 hdev->recv_evt = NULL;
2408
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 /* After this point our queues are empty
2410 * and no tasks are scheduled. */
2411 hdev->close(hdev);
2412
Johan Hedberg35b973c2013-03-15 17:06:59 -05002413 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002414 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002415 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2416
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002417 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2418 if (hdev->dev_type == HCI_BREDR) {
2419 hci_dev_lock(hdev);
2420 mgmt_powered(hdev, 0);
2421 hci_dev_unlock(hdev);
2422 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002423 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002424
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002425 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002426 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002427
Johan Hedberge59fda82012-02-22 18:11:53 +02002428 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002429 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002430 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002431
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 hci_req_unlock(hdev);
2433
2434 hci_dev_put(hdev);
2435 return 0;
2436}
2437
2438int hci_dev_close(__u16 dev)
2439{
2440 struct hci_dev *hdev;
2441 int err;
2442
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002443 hdev = hci_dev_get(dev);
2444 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002446
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002447 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2448 err = -EBUSY;
2449 goto done;
2450 }
2451
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2453 cancel_delayed_work(&hdev->power_off);
2454
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002456
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002457done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 hci_dev_put(hdev);
2459 return err;
2460}
2461
2462int hci_dev_reset(__u16 dev)
2463{
2464 struct hci_dev *hdev;
2465 int ret = 0;
2466
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002467 hdev = hci_dev_get(dev);
2468 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469 return -ENODEV;
2470
2471 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472
Marcel Holtmann808a0492013-08-26 20:57:58 -07002473 if (!test_bit(HCI_UP, &hdev->flags)) {
2474 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002476 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 ret = -EBUSY;
2480 goto done;
2481 }
2482
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002483 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2484 ret = -EOPNOTSUPP;
2485 goto done;
2486 }
2487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 /* Drop queues */
2489 skb_queue_purge(&hdev->rx_q);
2490 skb_queue_purge(&hdev->cmd_q);
2491
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002492 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002493 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002495 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496
2497 if (hdev->flush)
2498 hdev->flush(hdev);
2499
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002500 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002501 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002503 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
2505done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 hci_req_unlock(hdev);
2507 hci_dev_put(hdev);
2508 return ret;
2509}
2510
2511int hci_dev_reset_stat(__u16 dev)
2512{
2513 struct hci_dev *hdev;
2514 int ret = 0;
2515
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002516 hdev = hci_dev_get(dev);
2517 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 return -ENODEV;
2519
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002520 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2521 ret = -EBUSY;
2522 goto done;
2523 }
2524
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002525 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2526 ret = -EOPNOTSUPP;
2527 goto done;
2528 }
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2531
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002532done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 return ret;
2535}
2536
2537int hci_dev_cmd(unsigned int cmd, void __user *arg)
2538{
2539 struct hci_dev *hdev;
2540 struct hci_dev_req dr;
2541 int err = 0;
2542
2543 if (copy_from_user(&dr, arg, sizeof(dr)))
2544 return -EFAULT;
2545
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002546 hdev = hci_dev_get(dr.dev_id);
2547 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 return -ENODEV;
2549
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002550 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2551 err = -EBUSY;
2552 goto done;
2553 }
2554
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002555 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2556 err = -EOPNOTSUPP;
2557 goto done;
2558 }
2559
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002560 if (hdev->dev_type != HCI_BREDR) {
2561 err = -EOPNOTSUPP;
2562 goto done;
2563 }
2564
Johan Hedberg56f87902013-10-02 13:43:13 +03002565 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2566 err = -EOPNOTSUPP;
2567 goto done;
2568 }
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 switch (cmd) {
2571 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002572 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2573 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 break;
2575
2576 case HCISETENCRYPT:
2577 if (!lmp_encrypt_capable(hdev)) {
2578 err = -EOPNOTSUPP;
2579 break;
2580 }
2581
2582 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2583 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002584 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2585 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 if (err)
2587 break;
2588 }
2589
Johan Hedberg01178cd2013-03-05 20:37:41 +02002590 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2591 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 break;
2593
2594 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002595 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2596 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 break;
2598
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002599 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002600 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2601 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002602 break;
2603
2604 case HCISETLINKMODE:
2605 hdev->link_mode = ((__u16) dr.dev_opt) &
2606 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2607 break;
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 case HCISETPTYPE:
2610 hdev->pkt_type = (__u16) dr.dev_opt;
2611 break;
2612
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002614 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2615 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 break;
2617
2618 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002619 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2620 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 break;
2622
2623 default:
2624 err = -EINVAL;
2625 break;
2626 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002627
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002628done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 hci_dev_put(hdev);
2630 return err;
2631}
2632
2633int hci_get_dev_list(void __user *arg)
2634{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002635 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 struct hci_dev_list_req *dl;
2637 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 int n = 0, size, err;
2639 __u16 dev_num;
2640
2641 if (get_user(dev_num, (__u16 __user *) arg))
2642 return -EFAULT;
2643
2644 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2645 return -EINVAL;
2646
2647 size = sizeof(*dl) + dev_num * sizeof(*dr);
2648
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002649 dl = kzalloc(size, GFP_KERNEL);
2650 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 return -ENOMEM;
2652
2653 dr = dl->dev_req;
2654
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002655 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002656 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002657 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002658 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002659
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002660 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2661 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002662
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 (dr + n)->dev_id = hdev->id;
2664 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002665
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 if (++n >= dev_num)
2667 break;
2668 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002669 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
2671 dl->dev_num = n;
2672 size = sizeof(*dl) + n * sizeof(*dr);
2673
2674 err = copy_to_user(arg, dl, size);
2675 kfree(dl);
2676
2677 return err ? -EFAULT : 0;
2678}
2679
2680int hci_get_dev_info(void __user *arg)
2681{
2682 struct hci_dev *hdev;
2683 struct hci_dev_info di;
2684 int err = 0;
2685
2686 if (copy_from_user(&di, arg, sizeof(di)))
2687 return -EFAULT;
2688
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002689 hdev = hci_dev_get(di.dev_id);
2690 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 return -ENODEV;
2692
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002693 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002694 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002695
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002696 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2697 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 strcpy(di.name, hdev->name);
2700 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002701 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 di.flags = hdev->flags;
2703 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002704 if (lmp_bredr_capable(hdev)) {
2705 di.acl_mtu = hdev->acl_mtu;
2706 di.acl_pkts = hdev->acl_pkts;
2707 di.sco_mtu = hdev->sco_mtu;
2708 di.sco_pkts = hdev->sco_pkts;
2709 } else {
2710 di.acl_mtu = hdev->le_mtu;
2711 di.acl_pkts = hdev->le_pkts;
2712 di.sco_mtu = 0;
2713 di.sco_pkts = 0;
2714 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 di.link_policy = hdev->link_policy;
2716 di.link_mode = hdev->link_mode;
2717
2718 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2719 memcpy(&di.features, &hdev->features, sizeof(di.features));
2720
2721 if (copy_to_user(arg, &di, sizeof(di)))
2722 err = -EFAULT;
2723
2724 hci_dev_put(hdev);
2725
2726 return err;
2727}
2728
2729/* ---- Interface to HCI drivers ---- */
2730
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002731static int hci_rfkill_set_block(void *data, bool blocked)
2732{
2733 struct hci_dev *hdev = data;
2734
2735 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2736
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002737 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2738 return -EBUSY;
2739
Johan Hedberg5e130362013-09-13 08:58:17 +03002740 if (blocked) {
2741 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002742 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2743 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002744 } else {
2745 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002746 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002747
2748 return 0;
2749}
2750
2751static const struct rfkill_ops hci_rfkill_ops = {
2752 .set_block = hci_rfkill_set_block,
2753};
2754
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002755static void hci_power_on(struct work_struct *work)
2756{
2757 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002758 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002759
2760 BT_DBG("%s", hdev->name);
2761
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002762 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002763 if (err < 0) {
2764 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002765 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002766 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002767
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002768 /* During the HCI setup phase, a few error conditions are
2769 * ignored and they need to be checked now. If they are still
2770 * valid, it is important to turn the device back off.
2771 */
2772 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2773 (hdev->dev_type == HCI_BREDR &&
2774 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2775 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002776 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2777 hci_dev_do_close(hdev);
2778 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002779 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2780 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002781 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002782
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002783 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2784 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2785 mgmt_index_added(hdev);
2786 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002787}
2788
2789static void hci_power_off(struct work_struct *work)
2790{
Johan Hedberg32435532011-11-07 22:16:04 +02002791 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002792 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002793
2794 BT_DBG("%s", hdev->name);
2795
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002796 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002797}
2798
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002799static void hci_discov_off(struct work_struct *work)
2800{
2801 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002802
2803 hdev = container_of(work, struct hci_dev, discov_off.work);
2804
2805 BT_DBG("%s", hdev->name);
2806
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002807 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002808}
2809
Johan Hedberg35f74982014-02-18 17:14:32 +02002810void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002811{
Johan Hedberg48210022013-01-27 00:31:28 +02002812 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002813
Johan Hedberg48210022013-01-27 00:31:28 +02002814 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2815 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002816 kfree(uuid);
2817 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002818}
2819
Johan Hedberg35f74982014-02-18 17:14:32 +02002820void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002821{
2822 struct list_head *p, *n;
2823
2824 list_for_each_safe(p, n, &hdev->link_keys) {
2825 struct link_key *key;
2826
2827 key = list_entry(p, struct link_key, list);
2828
2829 list_del(p);
2830 kfree(key);
2831 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002832}
2833
Johan Hedberg35f74982014-02-18 17:14:32 +02002834void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002835{
2836 struct smp_ltk *k, *tmp;
2837
2838 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2839 list_del(&k->list);
2840 kfree(k);
2841 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002842}
2843
Johan Hedberg970c4e42014-02-18 10:19:33 +02002844void hci_smp_irks_clear(struct hci_dev *hdev)
2845{
2846 struct smp_irk *k, *tmp;
2847
2848 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2849 list_del(&k->list);
2850 kfree(k);
2851 }
2852}
2853
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002854struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2855{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002856 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002857
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002858 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002859 if (bacmp(bdaddr, &k->bdaddr) == 0)
2860 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002861
2862 return NULL;
2863}
2864
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302865static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002866 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002867{
2868 /* Legacy key */
2869 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302870 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002871
2872 /* Debug keys are insecure so don't store them persistently */
2873 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302874 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002875
2876 /* Changed combination key and there's no previous one */
2877 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302878 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002879
2880 /* Security mode 3 case */
2881 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302882 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002883
2884 /* Neither local nor remote side had no-bonding as requirement */
2885 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302886 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002887
2888 /* Local side had dedicated bonding as requirement */
2889 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302890 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002891
2892 /* Remote side had dedicated bonding as requirement */
2893 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302894 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002895
2896 /* If none of the above criteria match, then don't store the key
2897 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302898 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002899}
2900
Johan Hedberg98a0b842014-01-30 19:40:00 -08002901static bool ltk_type_master(u8 type)
2902{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002903 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002904}
2905
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002906struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002907 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002908{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002909 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002910
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002911 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002912 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002913 continue;
2914
Johan Hedberg98a0b842014-01-30 19:40:00 -08002915 if (ltk_type_master(k->type) != master)
2916 continue;
2917
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002918 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002919 }
2920
2921 return NULL;
2922}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002923
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002924struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002925 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002926{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002927 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002928
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002929 list_for_each_entry(k, &hdev->long_term_keys, list)
2930 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002931 bacmp(bdaddr, &k->bdaddr) == 0 &&
2932 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002933 return k;
2934
2935 return NULL;
2936}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002937
Johan Hedberg970c4e42014-02-18 10:19:33 +02002938struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2939{
2940 struct smp_irk *irk;
2941
2942 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2943 if (!bacmp(&irk->rpa, rpa))
2944 return irk;
2945 }
2946
2947 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2948 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2949 bacpy(&irk->rpa, rpa);
2950 return irk;
2951 }
2952 }
2953
2954 return NULL;
2955}
2956
2957struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2958 u8 addr_type)
2959{
2960 struct smp_irk *irk;
2961
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002962 /* Identity Address must be public or static random */
2963 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2964 return NULL;
2965
Johan Hedberg970c4e42014-02-18 10:19:33 +02002966 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2967 if (addr_type == irk->addr_type &&
2968 bacmp(bdaddr, &irk->bdaddr) == 0)
2969 return irk;
2970 }
2971
2972 return NULL;
2973}
2974
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002975struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002976 bdaddr_t *bdaddr, u8 *val, u8 type,
2977 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002978{
2979 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302980 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002981
2982 old_key = hci_find_link_key(hdev, bdaddr);
2983 if (old_key) {
2984 old_key_type = old_key->type;
2985 key = old_key;
2986 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002987 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002988 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002989 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002990 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002991 list_add(&key->list, &hdev->link_keys);
2992 }
2993
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002994 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002995
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002996 /* Some buggy controller combinations generate a changed
2997 * combination key for legacy pairing even when there's no
2998 * previous key */
2999 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003000 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003001 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003002 if (conn)
3003 conn->key_type = type;
3004 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003005
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003006 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003007 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003008 key->pin_len = pin_len;
3009
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003010 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003011 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003012 else
3013 key->type = type;
3014
Johan Hedberg7652ff62014-06-24 13:15:49 +03003015 if (persistent)
3016 *persistent = hci_persistent_key(hdev, conn, type,
3017 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003018
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003019 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003020}
3021
Johan Hedbergca9142b2014-02-19 14:57:44 +02003022struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003023 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003024 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003025{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003026 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003027 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028
Johan Hedberg98a0b842014-01-30 19:40:00 -08003029 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003030 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003031 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003032 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003033 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003034 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003035 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003036 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003037 }
3038
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003039 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003040 key->bdaddr_type = addr_type;
3041 memcpy(key->val, tk, sizeof(key->val));
3042 key->authenticated = authenticated;
3043 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003044 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003045 key->enc_size = enc_size;
3046 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003047
Johan Hedbergca9142b2014-02-19 14:57:44 +02003048 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049}
3050
Johan Hedbergca9142b2014-02-19 14:57:44 +02003051struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3052 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003053{
3054 struct smp_irk *irk;
3055
3056 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3057 if (!irk) {
3058 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3059 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003060 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003061
3062 bacpy(&irk->bdaddr, bdaddr);
3063 irk->addr_type = addr_type;
3064
3065 list_add(&irk->list, &hdev->identity_resolving_keys);
3066 }
3067
3068 memcpy(irk->val, val, 16);
3069 bacpy(&irk->rpa, rpa);
3070
Johan Hedbergca9142b2014-02-19 14:57:44 +02003071 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003072}
3073
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003074int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3075{
3076 struct link_key *key;
3077
3078 key = hci_find_link_key(hdev, bdaddr);
3079 if (!key)
3080 return -ENOENT;
3081
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003082 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003083
3084 list_del(&key->list);
3085 kfree(key);
3086
3087 return 0;
3088}
3089
Johan Hedberge0b2b272014-02-18 17:14:31 +02003090int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003091{
3092 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003093 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003094
3095 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003096 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003097 continue;
3098
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003099 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003100
3101 list_del(&k->list);
3102 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003103 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003104 }
3105
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003106 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003107}
3108
Johan Hedberga7ec7332014-02-18 17:14:35 +02003109void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3110{
3111 struct smp_irk *k, *tmp;
3112
Johan Hedberg668b7b12014-02-21 16:03:31 +02003113 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003114 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3115 continue;
3116
3117 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3118
3119 list_del(&k->list);
3120 kfree(k);
3121 }
3122}
3123
Ville Tervo6bd32322011-02-16 16:32:41 +02003124/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003125static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003126{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003127 struct hci_dev *hdev = container_of(work, struct hci_dev,
3128 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003129
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003130 if (hdev->sent_cmd) {
3131 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3132 u16 opcode = __le16_to_cpu(sent->opcode);
3133
3134 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3135 } else {
3136 BT_ERR("%s command tx timeout", hdev->name);
3137 }
3138
Ville Tervo6bd32322011-02-16 16:32:41 +02003139 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003140 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003141}
3142
Szymon Janc2763eda2011-03-22 13:12:22 +01003143struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003144 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003145{
3146 struct oob_data *data;
3147
3148 list_for_each_entry(data, &hdev->remote_oob_data, list)
3149 if (bacmp(bdaddr, &data->bdaddr) == 0)
3150 return data;
3151
3152 return NULL;
3153}
3154
3155int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3156{
3157 struct oob_data *data;
3158
3159 data = hci_find_remote_oob_data(hdev, bdaddr);
3160 if (!data)
3161 return -ENOENT;
3162
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003163 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003164
3165 list_del(&data->list);
3166 kfree(data);
3167
3168 return 0;
3169}
3170
Johan Hedberg35f74982014-02-18 17:14:32 +02003171void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003172{
3173 struct oob_data *data, *n;
3174
3175 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3176 list_del(&data->list);
3177 kfree(data);
3178 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003179}
3180
Marcel Holtmann07988722014-01-10 02:07:29 -08003181int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3182 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003183{
3184 struct oob_data *data;
3185
3186 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003187 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003188 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003189 if (!data)
3190 return -ENOMEM;
3191
3192 bacpy(&data->bdaddr, bdaddr);
3193 list_add(&data->list, &hdev->remote_oob_data);
3194 }
3195
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003196 memcpy(data->hash192, hash, sizeof(data->hash192));
3197 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003198
Marcel Holtmann07988722014-01-10 02:07:29 -08003199 memset(data->hash256, 0, sizeof(data->hash256));
3200 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3201
3202 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3203
3204 return 0;
3205}
3206
3207int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3208 u8 *hash192, u8 *randomizer192,
3209 u8 *hash256, u8 *randomizer256)
3210{
3211 struct oob_data *data;
3212
3213 data = hci_find_remote_oob_data(hdev, bdaddr);
3214 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003215 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003216 if (!data)
3217 return -ENOMEM;
3218
3219 bacpy(&data->bdaddr, bdaddr);
3220 list_add(&data->list, &hdev->remote_oob_data);
3221 }
3222
3223 memcpy(data->hash192, hash192, sizeof(data->hash192));
3224 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3225
3226 memcpy(data->hash256, hash256, sizeof(data->hash256));
3227 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3228
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003229 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003230
3231 return 0;
3232}
3233
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003234struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3235 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003236{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003237 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003238
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003239 list_for_each_entry(b, &hdev->blacklist, list) {
3240 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003241 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003242 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003243
3244 return NULL;
3245}
3246
Marcel Holtmannc9507492014-02-27 19:35:54 -08003247static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003248{
3249 struct list_head *p, *n;
3250
3251 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003252 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003253
3254 list_del(p);
3255 kfree(b);
3256 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003257}
3258
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003259int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003260{
3261 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003263 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003264 return -EBADF;
3265
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003266 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003267 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003268
3269 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003270 if (!entry)
3271 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003272
3273 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003274 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003275
3276 list_add(&entry->list, &hdev->blacklist);
3277
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003278 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003279}
3280
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003281int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003282{
3283 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003284
Johan Hedberg35f74982014-02-18 17:14:32 +02003285 if (!bacmp(bdaddr, BDADDR_ANY)) {
3286 hci_blacklist_clear(hdev);
3287 return 0;
3288 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003289
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003290 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003291 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003292 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003293
3294 list_del(&entry->list);
3295 kfree(entry);
3296
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003297 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003298}
3299
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003300struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3301 bdaddr_t *bdaddr, u8 type)
3302{
3303 struct bdaddr_list *b;
3304
3305 list_for_each_entry(b, &hdev->le_white_list, list) {
3306 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3307 return b;
3308 }
3309
3310 return NULL;
3311}
3312
3313void hci_white_list_clear(struct hci_dev *hdev)
3314{
3315 struct list_head *p, *n;
3316
3317 list_for_each_safe(p, n, &hdev->le_white_list) {
3318 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3319
3320 list_del(p);
3321 kfree(b);
3322 }
3323}
3324
3325int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3326{
3327 struct bdaddr_list *entry;
3328
3329 if (!bacmp(bdaddr, BDADDR_ANY))
3330 return -EBADF;
3331
3332 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3333 if (!entry)
3334 return -ENOMEM;
3335
3336 bacpy(&entry->bdaddr, bdaddr);
3337 entry->bdaddr_type = type;
3338
3339 list_add(&entry->list, &hdev->le_white_list);
3340
3341 return 0;
3342}
3343
3344int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3345{
3346 struct bdaddr_list *entry;
3347
3348 if (!bacmp(bdaddr, BDADDR_ANY))
3349 return -EBADF;
3350
3351 entry = hci_white_list_lookup(hdev, bdaddr, type);
3352 if (!entry)
3353 return -ENOENT;
3354
3355 list_del(&entry->list);
3356 kfree(entry);
3357
3358 return 0;
3359}
3360
Andre Guedes15819a72014-02-03 13:56:18 -03003361/* This function requires the caller holds hdev->lock */
3362struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3363 bdaddr_t *addr, u8 addr_type)
3364{
3365 struct hci_conn_params *params;
3366
3367 list_for_each_entry(params, &hdev->le_conn_params, list) {
3368 if (bacmp(&params->addr, addr) == 0 &&
3369 params->addr_type == addr_type) {
3370 return params;
3371 }
3372 }
3373
3374 return NULL;
3375}
3376
Andre Guedescef952c2014-02-26 20:21:49 -03003377static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3378{
3379 struct hci_conn *conn;
3380
3381 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3382 if (!conn)
3383 return false;
3384
3385 if (conn->dst_type != type)
3386 return false;
3387
3388 if (conn->state != BT_CONNECTED)
3389 return false;
3390
3391 return true;
3392}
3393
Andre Guedesa9b0a042014-02-26 20:21:52 -03003394static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3395{
3396 if (addr_type == ADDR_LE_DEV_PUBLIC)
3397 return true;
3398
3399 /* Check for Random Static address type */
3400 if ((addr->b[5] & 0xc0) == 0xc0)
3401 return true;
3402
3403 return false;
3404}
3405
Andre Guedes15819a72014-02-03 13:56:18 -03003406/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003407struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3408 bdaddr_t *addr, u8 addr_type)
3409{
3410 struct bdaddr_list *entry;
3411
3412 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3413 if (bacmp(&entry->bdaddr, addr) == 0 &&
3414 entry->bdaddr_type == addr_type)
3415 return entry;
3416 }
3417
3418 return NULL;
3419}
3420
3421/* This function requires the caller holds hdev->lock */
3422void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3423{
3424 struct bdaddr_list *entry;
3425
3426 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3427 if (entry)
3428 goto done;
3429
3430 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3431 if (!entry) {
3432 BT_ERR("Out of memory");
3433 return;
3434 }
3435
3436 bacpy(&entry->bdaddr, addr);
3437 entry->bdaddr_type = addr_type;
3438
3439 list_add(&entry->list, &hdev->pend_le_conns);
3440
3441 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3442
3443done:
3444 hci_update_background_scan(hdev);
3445}
3446
3447/* This function requires the caller holds hdev->lock */
3448void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3449{
3450 struct bdaddr_list *entry;
3451
3452 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3453 if (!entry)
3454 goto done;
3455
3456 list_del(&entry->list);
3457 kfree(entry);
3458
3459 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3460
3461done:
3462 hci_update_background_scan(hdev);
3463}
3464
3465/* This function requires the caller holds hdev->lock */
3466void hci_pend_le_conns_clear(struct hci_dev *hdev)
3467{
3468 struct bdaddr_list *entry, *tmp;
3469
3470 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3471 list_del(&entry->list);
3472 kfree(entry);
3473 }
3474
3475 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003476
3477 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003478}
3479
3480/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003481struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3482 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003483{
3484 struct hci_conn_params *params;
3485
3486 if (!is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003487 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003488
3489 params = hci_conn_params_lookup(hdev, addr, addr_type);
3490 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003491 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003492
3493 params = kzalloc(sizeof(*params), GFP_KERNEL);
3494 if (!params) {
3495 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003496 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003497 }
3498
3499 bacpy(&params->addr, addr);
3500 params->addr_type = addr_type;
3501
3502 list_add(&params->list, &hdev->le_conn_params);
3503
3504 params->conn_min_interval = hdev->le_conn_min_interval;
3505 params->conn_max_interval = hdev->le_conn_max_interval;
3506 params->conn_latency = hdev->le_conn_latency;
3507 params->supervision_timeout = hdev->le_supv_timeout;
3508 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3509
3510 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3511
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003512 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003513}
3514
3515/* This function requires the caller holds hdev->lock */
3516int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003517 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003518{
3519 struct hci_conn_params *params;
3520
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003521 params = hci_conn_params_add(hdev, addr, addr_type);
3522 if (!params)
3523 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003524
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003525 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003526
Andre Guedescef952c2014-02-26 20:21:49 -03003527 switch (auto_connect) {
3528 case HCI_AUTO_CONN_DISABLED:
3529 case HCI_AUTO_CONN_LINK_LOSS:
3530 hci_pend_le_conn_del(hdev, addr, addr_type);
3531 break;
3532 case HCI_AUTO_CONN_ALWAYS:
3533 if (!is_connected(hdev, addr, addr_type))
3534 hci_pend_le_conn_add(hdev, addr, addr_type);
3535 break;
3536 }
Andre Guedes15819a72014-02-03 13:56:18 -03003537
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003538 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3539 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003540
3541 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003542}
3543
3544/* This function requires the caller holds hdev->lock */
3545void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3546{
3547 struct hci_conn_params *params;
3548
3549 params = hci_conn_params_lookup(hdev, addr, addr_type);
3550 if (!params)
3551 return;
3552
Andre Guedescef952c2014-02-26 20:21:49 -03003553 hci_pend_le_conn_del(hdev, addr, addr_type);
3554
Andre Guedes15819a72014-02-03 13:56:18 -03003555 list_del(&params->list);
3556 kfree(params);
3557
3558 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3559}
3560
3561/* This function requires the caller holds hdev->lock */
3562void hci_conn_params_clear(struct hci_dev *hdev)
3563{
3564 struct hci_conn_params *params, *tmp;
3565
3566 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3567 list_del(&params->list);
3568 kfree(params);
3569 }
3570
Marcel Holtmann1089b672014-06-29 13:41:50 +02003571 hci_pend_le_conns_clear(hdev);
3572
Andre Guedes15819a72014-02-03 13:56:18 -03003573 BT_DBG("All LE connection parameters were removed");
3574}
3575
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003576static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003577{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003578 if (status) {
3579 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003580
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003581 hci_dev_lock(hdev);
3582 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3583 hci_dev_unlock(hdev);
3584 return;
3585 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003586}
3587
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003588static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003589{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003590 /* General inquiry access code (GIAC) */
3591 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3592 struct hci_request req;
3593 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003594 int err;
3595
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003596 if (status) {
3597 BT_ERR("Failed to disable LE scanning: status %d", status);
3598 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003599 }
3600
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003601 switch (hdev->discovery.type) {
3602 case DISCOV_TYPE_LE:
3603 hci_dev_lock(hdev);
3604 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3605 hci_dev_unlock(hdev);
3606 break;
3607
3608 case DISCOV_TYPE_INTERLEAVED:
3609 hci_req_init(&req, hdev);
3610
3611 memset(&cp, 0, sizeof(cp));
3612 memcpy(&cp.lap, lap, sizeof(cp.lap));
3613 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3614 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3615
3616 hci_dev_lock(hdev);
3617
3618 hci_inquiry_cache_flush(hdev);
3619
3620 err = hci_req_run(&req, inquiry_complete);
3621 if (err) {
3622 BT_ERR("Inquiry request failed: err %d", err);
3623 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3624 }
3625
3626 hci_dev_unlock(hdev);
3627 break;
3628 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003629}
3630
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003631static void le_scan_disable_work(struct work_struct *work)
3632{
3633 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003634 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003635 struct hci_request req;
3636 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003637
3638 BT_DBG("%s", hdev->name);
3639
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003640 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003641
Andre Guedesb1efcc22014-02-26 20:21:40 -03003642 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003643
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003644 err = hci_req_run(&req, le_scan_disable_work_complete);
3645 if (err)
3646 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003647}
3648
Johan Hedberg8d972502014-02-28 12:54:14 +02003649static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3650{
3651 struct hci_dev *hdev = req->hdev;
3652
3653 /* If we're advertising or initiating an LE connection we can't
3654 * go ahead and change the random address at this time. This is
3655 * because the eventual initiator address used for the
3656 * subsequently created connection will be undefined (some
3657 * controllers use the new address and others the one we had
3658 * when the operation started).
3659 *
3660 * In this kind of scenario skip the update and let the random
3661 * address be updated at the next cycle.
3662 */
3663 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3664 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3665 BT_DBG("Deferring random address update");
3666 return;
3667 }
3668
3669 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3670}
3671
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003672int hci_update_random_address(struct hci_request *req, bool require_privacy,
3673 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003674{
3675 struct hci_dev *hdev = req->hdev;
3676 int err;
3677
3678 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003679 * current RPA has expired or there is something else than
3680 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003681 */
3682 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003683 int to;
3684
3685 *own_addr_type = ADDR_LE_DEV_RANDOM;
3686
3687 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003688 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003689 return 0;
3690
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003691 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003692 if (err < 0) {
3693 BT_ERR("%s failed to generate new RPA", hdev->name);
3694 return err;
3695 }
3696
Johan Hedberg8d972502014-02-28 12:54:14 +02003697 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003698
3699 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3700 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3701
3702 return 0;
3703 }
3704
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003705 /* In case of required privacy without resolvable private address,
3706 * use an unresolvable private address. This is useful for active
3707 * scanning and non-connectable advertising.
3708 */
3709 if (require_privacy) {
3710 bdaddr_t urpa;
3711
3712 get_random_bytes(&urpa, 6);
3713 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3714
3715 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003716 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003717 return 0;
3718 }
3719
Johan Hedbergebd3a742014-02-23 19:42:21 +02003720 /* If forcing static address is in use or there is no public
3721 * address use the static address as random address (but skip
3722 * the HCI command if the current random address is already the
3723 * static one.
3724 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003725 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003726 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3727 *own_addr_type = ADDR_LE_DEV_RANDOM;
3728 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3729 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3730 &hdev->static_addr);
3731 return 0;
3732 }
3733
3734 /* Neither privacy nor static address is being used so use a
3735 * public address.
3736 */
3737 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3738
3739 return 0;
3740}
3741
Johan Hedberga1f4c312014-02-27 14:05:41 +02003742/* Copy the Identity Address of the controller.
3743 *
3744 * If the controller has a public BD_ADDR, then by default use that one.
3745 * If this is a LE only controller without a public address, default to
3746 * the static random address.
3747 *
3748 * For debugging purposes it is possible to force controllers with a
3749 * public address to use the static random address instead.
3750 */
3751void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3752 u8 *bdaddr_type)
3753{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003754 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003755 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3756 bacpy(bdaddr, &hdev->static_addr);
3757 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3758 } else {
3759 bacpy(bdaddr, &hdev->bdaddr);
3760 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3761 }
3762}
3763
David Herrmann9be0dab2012-04-22 14:39:57 +02003764/* Alloc HCI device */
3765struct hci_dev *hci_alloc_dev(void)
3766{
3767 struct hci_dev *hdev;
3768
3769 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3770 if (!hdev)
3771 return NULL;
3772
David Herrmannb1b813d2012-04-22 14:39:58 +02003773 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3774 hdev->esco_type = (ESCO_HV1);
3775 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003776 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3777 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003778 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3779 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003780
David Herrmannb1b813d2012-04-22 14:39:58 +02003781 hdev->sniff_max_interval = 800;
3782 hdev->sniff_min_interval = 80;
3783
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003784 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003785 hdev->le_scan_interval = 0x0060;
3786 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003787 hdev->le_conn_min_interval = 0x0028;
3788 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003789 hdev->le_conn_latency = 0x0000;
3790 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003791
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003792 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003793 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003794 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3795 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003796
David Herrmannb1b813d2012-04-22 14:39:58 +02003797 mutex_init(&hdev->lock);
3798 mutex_init(&hdev->req_lock);
3799
3800 INIT_LIST_HEAD(&hdev->mgmt_pending);
3801 INIT_LIST_HEAD(&hdev->blacklist);
3802 INIT_LIST_HEAD(&hdev->uuids);
3803 INIT_LIST_HEAD(&hdev->link_keys);
3804 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003805 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003806 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003807 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003808 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003809 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003810 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003811
3812 INIT_WORK(&hdev->rx_work, hci_rx_work);
3813 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3814 INIT_WORK(&hdev->tx_work, hci_tx_work);
3815 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003816
David Herrmannb1b813d2012-04-22 14:39:58 +02003817 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3818 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3819 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3820
David Herrmannb1b813d2012-04-22 14:39:58 +02003821 skb_queue_head_init(&hdev->rx_q);
3822 skb_queue_head_init(&hdev->cmd_q);
3823 skb_queue_head_init(&hdev->raw_q);
3824
3825 init_waitqueue_head(&hdev->req_wait_q);
3826
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003827 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003828
David Herrmannb1b813d2012-04-22 14:39:58 +02003829 hci_init_sysfs(hdev);
3830 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003831
3832 return hdev;
3833}
3834EXPORT_SYMBOL(hci_alloc_dev);
3835
3836/* Free HCI device */
3837void hci_free_dev(struct hci_dev *hdev)
3838{
David Herrmann9be0dab2012-04-22 14:39:57 +02003839 /* will free via device release */
3840 put_device(&hdev->dev);
3841}
3842EXPORT_SYMBOL(hci_free_dev);
3843
Linus Torvalds1da177e2005-04-16 15:20:36 -07003844/* Register HCI device */
3845int hci_register_dev(struct hci_dev *hdev)
3846{
David Herrmannb1b813d2012-04-22 14:39:58 +02003847 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848
David Herrmann010666a2012-01-07 15:47:07 +01003849 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 return -EINVAL;
3851
Mat Martineau08add512011-11-02 16:18:36 -07003852 /* Do not allow HCI_AMP devices to register at index 0,
3853 * so the index can be used as the AMP controller ID.
3854 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003855 switch (hdev->dev_type) {
3856 case HCI_BREDR:
3857 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3858 break;
3859 case HCI_AMP:
3860 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3861 break;
3862 default:
3863 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003864 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003865
Sasha Levin3df92b32012-05-27 22:36:56 +02003866 if (id < 0)
3867 return id;
3868
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 sprintf(hdev->name, "hci%d", id);
3870 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003871
3872 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3873
Kees Cookd8537542013-07-03 15:04:57 -07003874 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3875 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003876 if (!hdev->workqueue) {
3877 error = -ENOMEM;
3878 goto err;
3879 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003880
Kees Cookd8537542013-07-03 15:04:57 -07003881 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3882 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003883 if (!hdev->req_workqueue) {
3884 destroy_workqueue(hdev->workqueue);
3885 error = -ENOMEM;
3886 goto err;
3887 }
3888
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003889 if (!IS_ERR_OR_NULL(bt_debugfs))
3890 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3891
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003892 dev_set_name(&hdev->dev, "%s", hdev->name);
3893
Johan Hedberg99780a72014-02-18 10:40:07 +02003894 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3895 CRYPTO_ALG_ASYNC);
3896 if (IS_ERR(hdev->tfm_aes)) {
3897 BT_ERR("Unable to create crypto context");
3898 error = PTR_ERR(hdev->tfm_aes);
3899 hdev->tfm_aes = NULL;
3900 goto err_wqueue;
3901 }
3902
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003903 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003904 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003905 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003907 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003908 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3909 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003910 if (hdev->rfkill) {
3911 if (rfkill_register(hdev->rfkill) < 0) {
3912 rfkill_destroy(hdev->rfkill);
3913 hdev->rfkill = NULL;
3914 }
3915 }
3916
Johan Hedberg5e130362013-09-13 08:58:17 +03003917 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3918 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3919
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003920 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003921 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003922
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003923 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003924 /* Assume BR/EDR support until proven otherwise (such as
3925 * through reading supported features during init.
3926 */
3927 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3928 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003929
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003930 write_lock(&hci_dev_list_lock);
3931 list_add(&hdev->list, &hci_dev_list);
3932 write_unlock(&hci_dev_list_lock);
3933
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003934 /* Devices that are marked for raw-only usage need to set
3935 * the HCI_RAW flag to indicate that only user channel is
3936 * supported.
3937 */
3938 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3939 set_bit(HCI_RAW, &hdev->flags);
3940
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003942 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943
Johan Hedberg19202572013-01-14 22:33:51 +02003944 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003945
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003947
Johan Hedberg99780a72014-02-18 10:40:07 +02003948err_tfm:
3949 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003950err_wqueue:
3951 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003952 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003953err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003954 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003955
David Herrmann33ca9542011-10-08 14:58:49 +02003956 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957}
3958EXPORT_SYMBOL(hci_register_dev);
3959
3960/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003961void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962{
Sasha Levin3df92b32012-05-27 22:36:56 +02003963 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003964
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003965 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
Johan Hovold94324962012-03-15 14:48:41 +01003967 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3968
Sasha Levin3df92b32012-05-27 22:36:56 +02003969 id = hdev->id;
3970
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003971 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003972 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003973 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974
3975 hci_dev_do_close(hdev);
3976
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303977 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003978 kfree_skb(hdev->reassembly[i]);
3979
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003980 cancel_work_sync(&hdev->power_on);
3981
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003982 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003983 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3984 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003985 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003986 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003987 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003988 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003989
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003990 /* mgmt_index_removed should take care of emptying the
3991 * pending list */
3992 BUG_ON(!list_empty(&hdev->mgmt_pending));
3993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 hci_notify(hdev, HCI_DEV_UNREG);
3995
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003996 if (hdev->rfkill) {
3997 rfkill_unregister(hdev->rfkill);
3998 rfkill_destroy(hdev->rfkill);
3999 }
4000
Johan Hedberg99780a72014-02-18 10:40:07 +02004001 if (hdev->tfm_aes)
4002 crypto_free_blkcipher(hdev->tfm_aes);
4003
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004004 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004005
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004006 debugfs_remove_recursive(hdev->debugfs);
4007
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004008 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004009 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004010
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004011 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004012 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004013 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004014 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004015 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004016 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004017 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004018 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004019 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004020 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004021
David Herrmanndc946bd2012-01-07 15:47:24 +01004022 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004023
4024 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025}
4026EXPORT_SYMBOL(hci_unregister_dev);
4027
4028/* Suspend HCI device */
4029int hci_suspend_dev(struct hci_dev *hdev)
4030{
4031 hci_notify(hdev, HCI_DEV_SUSPEND);
4032 return 0;
4033}
4034EXPORT_SYMBOL(hci_suspend_dev);
4035
4036/* Resume HCI device */
4037int hci_resume_dev(struct hci_dev *hdev)
4038{
4039 hci_notify(hdev, HCI_DEV_RESUME);
4040 return 0;
4041}
4042EXPORT_SYMBOL(hci_resume_dev);
4043
Marcel Holtmann76bca882009-11-18 00:40:39 +01004044/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004045int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004046{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004047 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004048 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004049 kfree_skb(skb);
4050 return -ENXIO;
4051 }
4052
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004053 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004054 bt_cb(skb)->incoming = 1;
4055
4056 /* Time stamp */
4057 __net_timestamp(skb);
4058
Marcel Holtmann76bca882009-11-18 00:40:39 +01004059 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004060 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004061
Marcel Holtmann76bca882009-11-18 00:40:39 +01004062 return 0;
4063}
4064EXPORT_SYMBOL(hci_recv_frame);
4065
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304066static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004067 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304068{
4069 int len = 0;
4070 int hlen = 0;
4071 int remain = count;
4072 struct sk_buff *skb;
4073 struct bt_skb_cb *scb;
4074
4075 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004076 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304077 return -EILSEQ;
4078
4079 skb = hdev->reassembly[index];
4080
4081 if (!skb) {
4082 switch (type) {
4083 case HCI_ACLDATA_PKT:
4084 len = HCI_MAX_FRAME_SIZE;
4085 hlen = HCI_ACL_HDR_SIZE;
4086 break;
4087 case HCI_EVENT_PKT:
4088 len = HCI_MAX_EVENT_SIZE;
4089 hlen = HCI_EVENT_HDR_SIZE;
4090 break;
4091 case HCI_SCODATA_PKT:
4092 len = HCI_MAX_SCO_SIZE;
4093 hlen = HCI_SCO_HDR_SIZE;
4094 break;
4095 }
4096
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004097 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304098 if (!skb)
4099 return -ENOMEM;
4100
4101 scb = (void *) skb->cb;
4102 scb->expect = hlen;
4103 scb->pkt_type = type;
4104
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304105 hdev->reassembly[index] = skb;
4106 }
4107
4108 while (count) {
4109 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004110 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304111
4112 memcpy(skb_put(skb, len), data, len);
4113
4114 count -= len;
4115 data += len;
4116 scb->expect -= len;
4117 remain = count;
4118
4119 switch (type) {
4120 case HCI_EVENT_PKT:
4121 if (skb->len == HCI_EVENT_HDR_SIZE) {
4122 struct hci_event_hdr *h = hci_event_hdr(skb);
4123 scb->expect = h->plen;
4124
4125 if (skb_tailroom(skb) < scb->expect) {
4126 kfree_skb(skb);
4127 hdev->reassembly[index] = NULL;
4128 return -ENOMEM;
4129 }
4130 }
4131 break;
4132
4133 case HCI_ACLDATA_PKT:
4134 if (skb->len == HCI_ACL_HDR_SIZE) {
4135 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4136 scb->expect = __le16_to_cpu(h->dlen);
4137
4138 if (skb_tailroom(skb) < scb->expect) {
4139 kfree_skb(skb);
4140 hdev->reassembly[index] = NULL;
4141 return -ENOMEM;
4142 }
4143 }
4144 break;
4145
4146 case HCI_SCODATA_PKT:
4147 if (skb->len == HCI_SCO_HDR_SIZE) {
4148 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4149 scb->expect = h->dlen;
4150
4151 if (skb_tailroom(skb) < scb->expect) {
4152 kfree_skb(skb);
4153 hdev->reassembly[index] = NULL;
4154 return -ENOMEM;
4155 }
4156 }
4157 break;
4158 }
4159
4160 if (scb->expect == 0) {
4161 /* Complete frame */
4162
4163 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004164 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304165
4166 hdev->reassembly[index] = NULL;
4167 return remain;
4168 }
4169 }
4170
4171 return remain;
4172}
4173
Marcel Holtmannef222012007-07-11 06:42:04 +02004174int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4175{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304176 int rem = 0;
4177
Marcel Holtmannef222012007-07-11 06:42:04 +02004178 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4179 return -EILSEQ;
4180
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004181 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004182 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304183 if (rem < 0)
4184 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004185
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304186 data += (count - rem);
4187 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004188 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004189
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304190 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004191}
4192EXPORT_SYMBOL(hci_recv_fragment);
4193
Suraj Sumangala99811512010-07-14 13:02:19 +05304194#define STREAM_REASSEMBLY 0
4195
4196int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4197{
4198 int type;
4199 int rem = 0;
4200
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004201 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304202 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4203
4204 if (!skb) {
4205 struct { char type; } *pkt;
4206
4207 /* Start of the frame */
4208 pkt = data;
4209 type = pkt->type;
4210
4211 data++;
4212 count--;
4213 } else
4214 type = bt_cb(skb)->pkt_type;
4215
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004216 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004217 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304218 if (rem < 0)
4219 return rem;
4220
4221 data += (count - rem);
4222 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004223 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304224
4225 return rem;
4226}
4227EXPORT_SYMBOL(hci_recv_stream_fragment);
4228
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229/* ---- Interface to upper protocols ---- */
4230
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231int hci_register_cb(struct hci_cb *cb)
4232{
4233 BT_DBG("%p name %s", cb, cb->name);
4234
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004235 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004237 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238
4239 return 0;
4240}
4241EXPORT_SYMBOL(hci_register_cb);
4242
4243int hci_unregister_cb(struct hci_cb *cb)
4244{
4245 BT_DBG("%p name %s", cb, cb->name);
4246
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004247 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004249 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
4251 return 0;
4252}
4253EXPORT_SYMBOL(hci_unregister_cb);
4254
Marcel Holtmann51086992013-10-10 14:54:19 -07004255static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004257 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004259 /* Time stamp */
4260 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004261
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004262 /* Send copy to monitor */
4263 hci_send_to_monitor(hdev, skb);
4264
4265 if (atomic_read(&hdev->promisc)) {
4266 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004267 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 }
4269
4270 /* Get rid of skb owner, prior to sending to the driver. */
4271 skb_orphan(skb);
4272
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004273 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004274 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004275}
4276
Johan Hedberg3119ae92013-03-05 20:37:44 +02004277void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4278{
4279 skb_queue_head_init(&req->cmd_q);
4280 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004281 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004282}
4283
4284int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4285{
4286 struct hci_dev *hdev = req->hdev;
4287 struct sk_buff *skb;
4288 unsigned long flags;
4289
4290 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4291
Andre Guedes5d73e032013-03-08 11:20:16 -03004292 /* If an error occured during request building, remove all HCI
4293 * commands queued on the HCI request queue.
4294 */
4295 if (req->err) {
4296 skb_queue_purge(&req->cmd_q);
4297 return req->err;
4298 }
4299
Johan Hedberg3119ae92013-03-05 20:37:44 +02004300 /* Do not allow empty requests */
4301 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004302 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004303
4304 skb = skb_peek_tail(&req->cmd_q);
4305 bt_cb(skb)->req.complete = complete;
4306
4307 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4308 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4309 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4310
4311 queue_work(hdev->workqueue, &hdev->cmd_work);
4312
4313 return 0;
4314}
4315
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004316static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004317 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318{
4319 int len = HCI_COMMAND_HDR_SIZE + plen;
4320 struct hci_command_hdr *hdr;
4321 struct sk_buff *skb;
4322
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004324 if (!skb)
4325 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004326
4327 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004328 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329 hdr->plen = plen;
4330
4331 if (plen)
4332 memcpy(skb_put(skb, plen), param, plen);
4333
4334 BT_DBG("skb len %d", skb->len);
4335
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004336 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004337
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004338 return skb;
4339}
4340
4341/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004342int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4343 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004344{
4345 struct sk_buff *skb;
4346
4347 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4348
4349 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4350 if (!skb) {
4351 BT_ERR("%s no memory for command", hdev->name);
4352 return -ENOMEM;
4353 }
4354
Johan Hedberg11714b32013-03-05 20:37:47 +02004355 /* Stand-alone HCI commands must be flaged as
4356 * single-command requests.
4357 */
4358 bt_cb(skb)->req.start = true;
4359
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004361 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362
4363 return 0;
4364}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004365
Johan Hedberg71c76a12013-03-05 20:37:46 +02004366/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004367void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4368 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004369{
4370 struct hci_dev *hdev = req->hdev;
4371 struct sk_buff *skb;
4372
4373 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4374
Andre Guedes34739c12013-03-08 11:20:18 -03004375 /* If an error occured during request building, there is no point in
4376 * queueing the HCI command. We can simply return.
4377 */
4378 if (req->err)
4379 return;
4380
Johan Hedberg71c76a12013-03-05 20:37:46 +02004381 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4382 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004383 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4384 hdev->name, opcode);
4385 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004386 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004387 }
4388
4389 if (skb_queue_empty(&req->cmd_q))
4390 bt_cb(skb)->req.start = true;
4391
Johan Hedberg02350a72013-04-03 21:50:29 +03004392 bt_cb(skb)->req.event = event;
4393
Johan Hedberg71c76a12013-03-05 20:37:46 +02004394 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004395}
4396
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004397void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4398 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004399{
4400 hci_req_add_ev(req, opcode, plen, param, 0);
4401}
4402
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004404void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405{
4406 struct hci_command_hdr *hdr;
4407
4408 if (!hdev->sent_cmd)
4409 return NULL;
4410
4411 hdr = (void *) hdev->sent_cmd->data;
4412
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004413 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 return NULL;
4415
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004416 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417
4418 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4419}
4420
4421/* Send ACL data */
4422static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4423{
4424 struct hci_acl_hdr *hdr;
4425 int len = skb->len;
4426
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004427 skb_push(skb, HCI_ACL_HDR_SIZE);
4428 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004429 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004430 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4431 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432}
4433
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004434static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004435 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004437 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 struct hci_dev *hdev = conn->hdev;
4439 struct sk_buff *list;
4440
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004441 skb->len = skb_headlen(skb);
4442 skb->data_len = 0;
4443
4444 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004445
4446 switch (hdev->dev_type) {
4447 case HCI_BREDR:
4448 hci_add_acl_hdr(skb, conn->handle, flags);
4449 break;
4450 case HCI_AMP:
4451 hci_add_acl_hdr(skb, chan->handle, flags);
4452 break;
4453 default:
4454 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4455 return;
4456 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004458 list = skb_shinfo(skb)->frag_list;
4459 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 /* Non fragmented */
4461 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004463 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004464 } else {
4465 /* Fragmented */
4466 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4467
4468 skb_shinfo(skb)->frag_list = NULL;
4469
4470 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004471 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004473 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004474
4475 flags &= ~ACL_START;
4476 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 do {
4478 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004479
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004480 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004481 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004482
4483 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4484
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004485 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486 } while (list);
4487
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004488 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004490}
4491
4492void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4493{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004494 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004495
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004496 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004497
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004498 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004500 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
4503/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004504void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505{
4506 struct hci_dev *hdev = conn->hdev;
4507 struct hci_sco_hdr hdr;
4508
4509 BT_DBG("%s len %d", hdev->name, skb->len);
4510
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004511 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512 hdr.dlen = skb->len;
4513
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004514 skb_push(skb, HCI_SCO_HDR_SIZE);
4515 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004516 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004518 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004519
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004521 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004522}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
4524/* ---- HCI TX task (outgoing data) ---- */
4525
4526/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004527static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4528 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529{
4530 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004531 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004532 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004533
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004534 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004536
4537 rcu_read_lock();
4538
4539 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004540 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004542
4543 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4544 continue;
4545
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 num++;
4547
4548 if (c->sent < min) {
4549 min = c->sent;
4550 conn = c;
4551 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004552
4553 if (hci_conn_num(hdev, type) == num)
4554 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004555 }
4556
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004557 rcu_read_unlock();
4558
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004560 int cnt, q;
4561
4562 switch (conn->type) {
4563 case ACL_LINK:
4564 cnt = hdev->acl_cnt;
4565 break;
4566 case SCO_LINK:
4567 case ESCO_LINK:
4568 cnt = hdev->sco_cnt;
4569 break;
4570 case LE_LINK:
4571 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4572 break;
4573 default:
4574 cnt = 0;
4575 BT_ERR("Unknown link type");
4576 }
4577
4578 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579 *quote = q ? q : 1;
4580 } else
4581 *quote = 0;
4582
4583 BT_DBG("conn %p quote %d", conn, *quote);
4584 return conn;
4585}
4586
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004587static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004588{
4589 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004590 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591
Ville Tervobae1f5d92011-02-10 22:38:53 -03004592 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004594 rcu_read_lock();
4595
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004597 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004598 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004599 BT_ERR("%s killing stalled connection %pMR",
4600 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004601 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 }
4603 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004604
4605 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606}
4607
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004608static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4609 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004610{
4611 struct hci_conn_hash *h = &hdev->conn_hash;
4612 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004613 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004614 struct hci_conn *conn;
4615 int cnt, q, conn_num = 0;
4616
4617 BT_DBG("%s", hdev->name);
4618
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004619 rcu_read_lock();
4620
4621 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004622 struct hci_chan *tmp;
4623
4624 if (conn->type != type)
4625 continue;
4626
4627 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4628 continue;
4629
4630 conn_num++;
4631
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004632 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004633 struct sk_buff *skb;
4634
4635 if (skb_queue_empty(&tmp->data_q))
4636 continue;
4637
4638 skb = skb_peek(&tmp->data_q);
4639 if (skb->priority < cur_prio)
4640 continue;
4641
4642 if (skb->priority > cur_prio) {
4643 num = 0;
4644 min = ~0;
4645 cur_prio = skb->priority;
4646 }
4647
4648 num++;
4649
4650 if (conn->sent < min) {
4651 min = conn->sent;
4652 chan = tmp;
4653 }
4654 }
4655
4656 if (hci_conn_num(hdev, type) == conn_num)
4657 break;
4658 }
4659
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004660 rcu_read_unlock();
4661
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004662 if (!chan)
4663 return NULL;
4664
4665 switch (chan->conn->type) {
4666 case ACL_LINK:
4667 cnt = hdev->acl_cnt;
4668 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004669 case AMP_LINK:
4670 cnt = hdev->block_cnt;
4671 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004672 case SCO_LINK:
4673 case ESCO_LINK:
4674 cnt = hdev->sco_cnt;
4675 break;
4676 case LE_LINK:
4677 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4678 break;
4679 default:
4680 cnt = 0;
4681 BT_ERR("Unknown link type");
4682 }
4683
4684 q = cnt / num;
4685 *quote = q ? q : 1;
4686 BT_DBG("chan %p quote %d", chan, *quote);
4687 return chan;
4688}
4689
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004690static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4691{
4692 struct hci_conn_hash *h = &hdev->conn_hash;
4693 struct hci_conn *conn;
4694 int num = 0;
4695
4696 BT_DBG("%s", hdev->name);
4697
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004698 rcu_read_lock();
4699
4700 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004701 struct hci_chan *chan;
4702
4703 if (conn->type != type)
4704 continue;
4705
4706 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4707 continue;
4708
4709 num++;
4710
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004711 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004712 struct sk_buff *skb;
4713
4714 if (chan->sent) {
4715 chan->sent = 0;
4716 continue;
4717 }
4718
4719 if (skb_queue_empty(&chan->data_q))
4720 continue;
4721
4722 skb = skb_peek(&chan->data_q);
4723 if (skb->priority >= HCI_PRIO_MAX - 1)
4724 continue;
4725
4726 skb->priority = HCI_PRIO_MAX - 1;
4727
4728 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004729 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004730 }
4731
4732 if (hci_conn_num(hdev, type) == num)
4733 break;
4734 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004735
4736 rcu_read_unlock();
4737
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004738}
4739
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004740static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4741{
4742 /* Calculate count of blocks used by this packet */
4743 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4744}
4745
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004746static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004747{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004748 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004749 /* ACL tx timeout must be longer than maximum
4750 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004751 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004752 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004753 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004755}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004757static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004758{
4759 unsigned int cnt = hdev->acl_cnt;
4760 struct hci_chan *chan;
4761 struct sk_buff *skb;
4762 int quote;
4763
4764 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004765
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004766 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004767 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004768 u32 priority = (skb_peek(&chan->data_q))->priority;
4769 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004770 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004771 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004772
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004773 /* Stop if priority has changed */
4774 if (skb->priority < priority)
4775 break;
4776
4777 skb = skb_dequeue(&chan->data_q);
4778
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004779 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004780 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004781
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004782 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783 hdev->acl_last_tx = jiffies;
4784
4785 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004786 chan->sent++;
4787 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004788 }
4789 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004790
4791 if (cnt != hdev->acl_cnt)
4792 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793}
4794
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004795static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004796{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004797 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004798 struct hci_chan *chan;
4799 struct sk_buff *skb;
4800 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004801 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004802
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004803 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004804
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004805 BT_DBG("%s", hdev->name);
4806
4807 if (hdev->dev_type == HCI_AMP)
4808 type = AMP_LINK;
4809 else
4810 type = ACL_LINK;
4811
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004812 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004813 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004814 u32 priority = (skb_peek(&chan->data_q))->priority;
4815 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4816 int blocks;
4817
4818 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004819 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004820
4821 /* Stop if priority has changed */
4822 if (skb->priority < priority)
4823 break;
4824
4825 skb = skb_dequeue(&chan->data_q);
4826
4827 blocks = __get_blocks(hdev, skb);
4828 if (blocks > hdev->block_cnt)
4829 return;
4830
4831 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004832 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004833
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004834 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004835 hdev->acl_last_tx = jiffies;
4836
4837 hdev->block_cnt -= blocks;
4838 quote -= blocks;
4839
4840 chan->sent += blocks;
4841 chan->conn->sent += blocks;
4842 }
4843 }
4844
4845 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004846 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004847}
4848
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004849static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004850{
4851 BT_DBG("%s", hdev->name);
4852
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004853 /* No ACL link over BR/EDR controller */
4854 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4855 return;
4856
4857 /* No AMP link over AMP controller */
4858 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004859 return;
4860
4861 switch (hdev->flow_ctl_mode) {
4862 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4863 hci_sched_acl_pkt(hdev);
4864 break;
4865
4866 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4867 hci_sched_acl_blk(hdev);
4868 break;
4869 }
4870}
4871
Linus Torvalds1da177e2005-04-16 15:20:36 -07004872/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004873static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874{
4875 struct hci_conn *conn;
4876 struct sk_buff *skb;
4877 int quote;
4878
4879 BT_DBG("%s", hdev->name);
4880
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004881 if (!hci_conn_num(hdev, SCO_LINK))
4882 return;
4883
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4885 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4886 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004887 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888
4889 conn->sent++;
4890 if (conn->sent == ~0)
4891 conn->sent = 0;
4892 }
4893 }
4894}
4895
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004896static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004897{
4898 struct hci_conn *conn;
4899 struct sk_buff *skb;
4900 int quote;
4901
4902 BT_DBG("%s", hdev->name);
4903
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004904 if (!hci_conn_num(hdev, ESCO_LINK))
4905 return;
4906
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004907 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4908 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004909 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4910 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004911 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004912
4913 conn->sent++;
4914 if (conn->sent == ~0)
4915 conn->sent = 0;
4916 }
4917 }
4918}
4919
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004920static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004921{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004922 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004923 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004924 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004925
4926 BT_DBG("%s", hdev->name);
4927
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004928 if (!hci_conn_num(hdev, LE_LINK))
4929 return;
4930
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004931 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004932 /* LE tx timeout must be longer than maximum
4933 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004934 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004935 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004936 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004937 }
4938
4939 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004940 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004941 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004942 u32 priority = (skb_peek(&chan->data_q))->priority;
4943 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004944 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004945 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004946
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004947 /* Stop if priority has changed */
4948 if (skb->priority < priority)
4949 break;
4950
4951 skb = skb_dequeue(&chan->data_q);
4952
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004953 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004954 hdev->le_last_tx = jiffies;
4955
4956 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004957 chan->sent++;
4958 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004959 }
4960 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004961
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004962 if (hdev->le_pkts)
4963 hdev->le_cnt = cnt;
4964 else
4965 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004966
4967 if (cnt != tmp)
4968 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004969}
4970
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004971static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004973 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 struct sk_buff *skb;
4975
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004977 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978
Marcel Holtmann52de5992013-09-03 18:08:38 -07004979 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4980 /* Schedule queues and send stuff to HCI driver */
4981 hci_sched_acl(hdev);
4982 hci_sched_sco(hdev);
4983 hci_sched_esco(hdev);
4984 hci_sched_le(hdev);
4985 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004986
Linus Torvalds1da177e2005-04-16 15:20:36 -07004987 /* Send next queued raw (unknown type) packet */
4988 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004989 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990}
4991
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004992/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993
4994/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004995static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004996{
4997 struct hci_acl_hdr *hdr = (void *) skb->data;
4998 struct hci_conn *conn;
4999 __u16 handle, flags;
5000
5001 skb_pull(skb, HCI_ACL_HDR_SIZE);
5002
5003 handle = __le16_to_cpu(hdr->handle);
5004 flags = hci_flags(handle);
5005 handle = hci_handle(handle);
5006
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005007 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005008 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009
5010 hdev->stat.acl_rx++;
5011
5012 hci_dev_lock(hdev);
5013 conn = hci_conn_hash_lookup_handle(hdev, handle);
5014 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005015
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005017 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005018
Linus Torvalds1da177e2005-04-16 15:20:36 -07005019 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005020 l2cap_recv_acldata(conn, skb, flags);
5021 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005023 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005024 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 }
5026
5027 kfree_skb(skb);
5028}
5029
5030/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005031static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032{
5033 struct hci_sco_hdr *hdr = (void *) skb->data;
5034 struct hci_conn *conn;
5035 __u16 handle;
5036
5037 skb_pull(skb, HCI_SCO_HDR_SIZE);
5038
5039 handle = __le16_to_cpu(hdr->handle);
5040
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005041 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005042
5043 hdev->stat.sco_rx++;
5044
5045 hci_dev_lock(hdev);
5046 conn = hci_conn_hash_lookup_handle(hdev, handle);
5047 hci_dev_unlock(hdev);
5048
5049 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005050 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005051 sco_recv_scodata(conn, skb);
5052 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005054 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005055 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 }
5057
5058 kfree_skb(skb);
5059}
5060
Johan Hedberg9238f362013-03-05 20:37:48 +02005061static bool hci_req_is_complete(struct hci_dev *hdev)
5062{
5063 struct sk_buff *skb;
5064
5065 skb = skb_peek(&hdev->cmd_q);
5066 if (!skb)
5067 return true;
5068
5069 return bt_cb(skb)->req.start;
5070}
5071
Johan Hedberg42c6b122013-03-05 20:37:49 +02005072static void hci_resend_last(struct hci_dev *hdev)
5073{
5074 struct hci_command_hdr *sent;
5075 struct sk_buff *skb;
5076 u16 opcode;
5077
5078 if (!hdev->sent_cmd)
5079 return;
5080
5081 sent = (void *) hdev->sent_cmd->data;
5082 opcode = __le16_to_cpu(sent->opcode);
5083 if (opcode == HCI_OP_RESET)
5084 return;
5085
5086 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5087 if (!skb)
5088 return;
5089
5090 skb_queue_head(&hdev->cmd_q, skb);
5091 queue_work(hdev->workqueue, &hdev->cmd_work);
5092}
5093
Johan Hedberg9238f362013-03-05 20:37:48 +02005094void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5095{
5096 hci_req_complete_t req_complete = NULL;
5097 struct sk_buff *skb;
5098 unsigned long flags;
5099
5100 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5101
Johan Hedberg42c6b122013-03-05 20:37:49 +02005102 /* If the completed command doesn't match the last one that was
5103 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005104 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005105 if (!hci_sent_cmd_data(hdev, opcode)) {
5106 /* Some CSR based controllers generate a spontaneous
5107 * reset complete event during init and any pending
5108 * command will never be completed. In such a case we
5109 * need to resend whatever was the last sent
5110 * command.
5111 */
5112 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5113 hci_resend_last(hdev);
5114
Johan Hedberg9238f362013-03-05 20:37:48 +02005115 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005116 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005117
5118 /* If the command succeeded and there's still more commands in
5119 * this request the request is not yet complete.
5120 */
5121 if (!status && !hci_req_is_complete(hdev))
5122 return;
5123
5124 /* If this was the last command in a request the complete
5125 * callback would be found in hdev->sent_cmd instead of the
5126 * command queue (hdev->cmd_q).
5127 */
5128 if (hdev->sent_cmd) {
5129 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005130
5131 if (req_complete) {
5132 /* We must set the complete callback to NULL to
5133 * avoid calling the callback more than once if
5134 * this function gets called again.
5135 */
5136 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5137
Johan Hedberg9238f362013-03-05 20:37:48 +02005138 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005139 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005140 }
5141
5142 /* Remove all pending commands belonging to this request */
5143 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5144 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5145 if (bt_cb(skb)->req.start) {
5146 __skb_queue_head(&hdev->cmd_q, skb);
5147 break;
5148 }
5149
5150 req_complete = bt_cb(skb)->req.complete;
5151 kfree_skb(skb);
5152 }
5153 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5154
5155call_complete:
5156 if (req_complete)
5157 req_complete(hdev, status);
5158}
5159
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005160static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005162 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163 struct sk_buff *skb;
5164
5165 BT_DBG("%s", hdev->name);
5166
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005168 /* Send copy to monitor */
5169 hci_send_to_monitor(hdev, skb);
5170
Linus Torvalds1da177e2005-04-16 15:20:36 -07005171 if (atomic_read(&hdev->promisc)) {
5172 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005173 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 }
5175
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005176 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005177 kfree_skb(skb);
5178 continue;
5179 }
5180
5181 if (test_bit(HCI_INIT, &hdev->flags)) {
5182 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005183 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 case HCI_ACLDATA_PKT:
5185 case HCI_SCODATA_PKT:
5186 kfree_skb(skb);
5187 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189 }
5190
5191 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005192 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005194 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195 hci_event_packet(hdev, skb);
5196 break;
5197
5198 case HCI_ACLDATA_PKT:
5199 BT_DBG("%s ACL data packet", hdev->name);
5200 hci_acldata_packet(hdev, skb);
5201 break;
5202
5203 case HCI_SCODATA_PKT:
5204 BT_DBG("%s SCO data packet", hdev->name);
5205 hci_scodata_packet(hdev, skb);
5206 break;
5207
5208 default:
5209 kfree_skb(skb);
5210 break;
5211 }
5212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213}
5214
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005215static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005217 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 struct sk_buff *skb;
5219
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005220 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5221 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005224 if (atomic_read(&hdev->cmd_cnt)) {
5225 skb = skb_dequeue(&hdev->cmd_q);
5226 if (!skb)
5227 return;
5228
Wei Yongjun7585b972009-02-25 18:29:52 +08005229 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005231 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005232 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005234 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005235 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005236 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005237 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005238 schedule_delayed_work(&hdev->cmd_timer,
5239 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 } else {
5241 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005242 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 }
5244 }
5245}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005246
5247void hci_req_add_le_scan_disable(struct hci_request *req)
5248{
5249 struct hci_cp_le_set_scan_enable cp;
5250
5251 memset(&cp, 0, sizeof(cp));
5252 cp.enable = LE_SCAN_DISABLE;
5253 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5254}
Andre Guedesa4790db2014-02-26 20:21:47 -03005255
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005256void hci_req_add_le_passive_scan(struct hci_request *req)
5257{
5258 struct hci_cp_le_set_scan_param param_cp;
5259 struct hci_cp_le_set_scan_enable enable_cp;
5260 struct hci_dev *hdev = req->hdev;
5261 u8 own_addr_type;
5262
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005263 /* Set require_privacy to false since no SCAN_REQ are send
5264 * during passive scanning. Not using an unresolvable address
5265 * here is important so that peer devices using direct
5266 * advertising with our address will be correctly reported
5267 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005268 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005269 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005270 return;
5271
5272 memset(&param_cp, 0, sizeof(param_cp));
5273 param_cp.type = LE_SCAN_PASSIVE;
5274 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5275 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5276 param_cp.own_address_type = own_addr_type;
5277 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5278 &param_cp);
5279
5280 memset(&enable_cp, 0, sizeof(enable_cp));
5281 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005282 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005283 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5284 &enable_cp);
5285}
5286
Andre Guedesa4790db2014-02-26 20:21:47 -03005287static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5288{
5289 if (status)
5290 BT_DBG("HCI request failed to update background scanning: "
5291 "status 0x%2.2x", status);
5292}
5293
5294/* This function controls the background scanning based on hdev->pend_le_conns
5295 * list. If there are pending LE connection we start the background scanning,
5296 * otherwise we stop it.
5297 *
5298 * This function requires the caller holds hdev->lock.
5299 */
5300void hci_update_background_scan(struct hci_dev *hdev)
5301{
Andre Guedesa4790db2014-02-26 20:21:47 -03005302 struct hci_request req;
5303 struct hci_conn *conn;
5304 int err;
5305
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005306 if (!test_bit(HCI_UP, &hdev->flags) ||
5307 test_bit(HCI_INIT, &hdev->flags) ||
5308 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5309 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005310 return;
5311
Andre Guedesa4790db2014-02-26 20:21:47 -03005312 hci_req_init(&req, hdev);
5313
5314 if (list_empty(&hdev->pend_le_conns)) {
5315 /* If there is no pending LE connections, we should stop
5316 * the background scanning.
5317 */
5318
5319 /* If controller is not scanning we are done. */
5320 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5321 return;
5322
5323 hci_req_add_le_scan_disable(&req);
5324
5325 BT_DBG("%s stopping background scanning", hdev->name);
5326 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005327 /* If there is at least one pending LE connection, we should
5328 * keep the background scan running.
5329 */
5330
Andre Guedesa4790db2014-02-26 20:21:47 -03005331 /* If controller is connecting, we should not start scanning
5332 * since some controllers are not able to scan and connect at
5333 * the same time.
5334 */
5335 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5336 if (conn)
5337 return;
5338
Andre Guedes4340a122014-03-10 18:26:24 -03005339 /* If controller is currently scanning, we stop it to ensure we
5340 * don't miss any advertising (due to duplicates filter).
5341 */
5342 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5343 hci_req_add_le_scan_disable(&req);
5344
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005345 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005346
5347 BT_DBG("%s starting background scanning", hdev->name);
5348 }
5349
5350 err = hci_req_run(&req, update_background_scan_complete);
5351 if (err)
5352 BT_ERR("Failed to run HCI request: err %d", err);
5353}