blob: f1c5a077e5584e7fbcb8bfcd68a550bfb5664671 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
1445
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450
1451 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
1462 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 struct hci_cp_write_le_host_supported cp;
1529
Johan Hedbergc73eee92013-04-19 18:35:21 +03001530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544}
1545
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001554 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001564 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001580 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001581 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001595 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608
Andre Guedes9193c6e2014-07-01 18:10:09 -03001609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
Andre Guedes9193c6e2014-07-01 18:10:09 -03001623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001627 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001637}
1638
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001647 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001648 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001650
1651 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001652 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659}
1660
Johan Hedberg2177bab2013-03-05 20:37:43 +02001661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001726 }
1727
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001728 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001735 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001736
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001746 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001789 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001790
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001791 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001792}
1793
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001794static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795{
1796 struct hci_dev *hdev = req->hdev;
1797
1798 BT_DBG("%s %ld", hdev->name, opt);
1799
1800 /* Reset */
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1803
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810}
1811
1812static int __hci_unconf_init(struct hci_dev *hdev)
1813{
1814 int err;
1815
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1817 return 0;
1818
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001819 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1820 if (err < 0)
1821 return err;
1822
1823 return 0;
1824}
1825
Johan Hedberg42c6b122013-03-05 20:37:49 +02001826static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827{
1828 __u8 scan = opt;
1829
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
1838 __u8 auth = opt;
1839
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
Johan Hedberg42c6b122013-03-05 20:37:49 +02001846static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847{
1848 __u8 encrypt = opt;
1849
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001852 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001853 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Johan Hedberg42c6b122013-03-05 20:37:49 +02001856static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001857{
1858 __le16 policy = cpu_to_le16(opt);
1859
Johan Hedberg42c6b122013-03-05 20:37:49 +02001860 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001861
1862 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001863 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001864}
1865
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001866/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 * Device is held on return. */
1868struct hci_dev *hci_dev_get(int index)
1869{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001870 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
1872 BT_DBG("%d", index);
1873
1874 if (index < 0)
1875 return NULL;
1876
1877 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001878 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 if (d->id == index) {
1880 hdev = hci_dev_hold(d);
1881 break;
1882 }
1883 }
1884 read_unlock(&hci_dev_list_lock);
1885 return hdev;
1886}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001889
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001890bool hci_discovery_active(struct hci_dev *hdev)
1891{
1892 struct discovery_state *discov = &hdev->discovery;
1893
Andre Guedes6fbe1952012-02-03 17:47:58 -03001894 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001895 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001896 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001897 return true;
1898
Andre Guedes6fbe1952012-02-03 17:47:58 -03001899 default:
1900 return false;
1901 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001902}
1903
Johan Hedbergff9ef572012-01-04 14:23:45 +02001904void hci_discovery_set_state(struct hci_dev *hdev, int state)
1905{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001906 int old_state = hdev->discovery.state;
1907
Johan Hedbergff9ef572012-01-04 14:23:45 +02001908 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1909
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001910 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02001911 return;
1912
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001913 hdev->discovery.state = state;
1914
Johan Hedbergff9ef572012-01-04 14:23:45 +02001915 switch (state) {
1916 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001917 hci_update_background_scan(hdev);
1918
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001919 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03001920 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001921 break;
1922 case DISCOVERY_STARTING:
1923 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001924 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001925 mgmt_discovering(hdev, 1);
1926 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001927 case DISCOVERY_RESOLVING:
1928 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001929 case DISCOVERY_STOPPING:
1930 break;
1931 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02001932}
1933
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001934void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
Johan Hedberg30883512012-01-04 14:16:21 +02001936 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001937 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
Johan Hedberg561aafb2012-01-04 13:31:59 +02001939 list_for_each_entry_safe(p, n, &cache->all, all) {
1940 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001941 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001943
1944 INIT_LIST_HEAD(&cache->unknown);
1945 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001948struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1949 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950{
Johan Hedberg30883512012-01-04 14:16:21 +02001951 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 struct inquiry_entry *e;
1953
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001954 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Johan Hedberg561aafb2012-01-04 13:31:59 +02001956 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001958 return e;
1959 }
1960
1961 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962}
1963
Johan Hedberg561aafb2012-01-04 13:31:59 +02001964struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001965 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966{
Johan Hedberg30883512012-01-04 14:16:21 +02001967 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001968 struct inquiry_entry *e;
1969
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001970 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001971
1972 list_for_each_entry(e, &cache->unknown, list) {
1973 if (!bacmp(&e->data.bdaddr, bdaddr))
1974 return e;
1975 }
1976
1977 return NULL;
1978}
1979
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001980struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001981 bdaddr_t *bdaddr,
1982 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001983{
1984 struct discovery_state *cache = &hdev->discovery;
1985 struct inquiry_entry *e;
1986
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001987 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001988
1989 list_for_each_entry(e, &cache->resolve, list) {
1990 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1991 return e;
1992 if (!bacmp(&e->data.bdaddr, bdaddr))
1993 return e;
1994 }
1995
1996 return NULL;
1997}
1998
Johan Hedberga3d4e202012-01-09 00:53:02 +02001999void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002000 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002001{
2002 struct discovery_state *cache = &hdev->discovery;
2003 struct list_head *pos = &cache->resolve;
2004 struct inquiry_entry *p;
2005
2006 list_del(&ie->list);
2007
2008 list_for_each_entry(p, &cache->resolve, list) {
2009 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002010 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002011 break;
2012 pos = &p->list;
2013 }
2014
2015 list_add(&ie->list, pos);
2016}
2017
Marcel Holtmannaf589252014-07-01 14:11:20 +02002018u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2019 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020{
Johan Hedberg30883512012-01-04 14:16:21 +02002021 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002022 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002023 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002025 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026
Szymon Janc2b2fec42012-11-20 11:38:54 +01002027 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2028
Marcel Holtmannaf589252014-07-01 14:11:20 +02002029 if (!data->ssp_mode)
2030 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002031
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002032 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002033 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002034 if (!ie->data.ssp_mode)
2035 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002036
Johan Hedberga3d4e202012-01-09 00:53:02 +02002037 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002038 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002039 ie->data.rssi = data->rssi;
2040 hci_inquiry_cache_update_resolve(hdev, ie);
2041 }
2042
Johan Hedberg561aafb2012-01-04 13:31:59 +02002043 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002044 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002045
Johan Hedberg561aafb2012-01-04 13:31:59 +02002046 /* Entry not in the cache. Add new one. */
2047 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002048 if (!ie) {
2049 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2050 goto done;
2051 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002052
2053 list_add(&ie->all, &cache->all);
2054
2055 if (name_known) {
2056 ie->name_state = NAME_KNOWN;
2057 } else {
2058 ie->name_state = NAME_NOT_KNOWN;
2059 list_add(&ie->list, &cache->unknown);
2060 }
2061
2062update:
2063 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002064 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002065 ie->name_state = NAME_KNOWN;
2066 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 }
2068
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002069 memcpy(&ie->data, data, sizeof(*data));
2070 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002072
2073 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002074 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002075
Marcel Holtmannaf589252014-07-01 14:11:20 +02002076done:
2077 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078}
2079
2080static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2081{
Johan Hedberg30883512012-01-04 14:16:21 +02002082 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 struct inquiry_info *info = (struct inquiry_info *) buf;
2084 struct inquiry_entry *e;
2085 int copied = 0;
2086
Johan Hedberg561aafb2012-01-04 13:31:59 +02002087 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002089
2090 if (copied >= num)
2091 break;
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 bacpy(&info->bdaddr, &data->bdaddr);
2094 info->pscan_rep_mode = data->pscan_rep_mode;
2095 info->pscan_period_mode = data->pscan_period_mode;
2096 info->pscan_mode = data->pscan_mode;
2097 memcpy(info->dev_class, data->dev_class, 3);
2098 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002099
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002101 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 }
2103
2104 BT_DBG("cache %p, copied %d", cache, copied);
2105 return copied;
2106}
2107
Johan Hedberg42c6b122013-03-05 20:37:49 +02002108static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
2110 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002111 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 struct hci_cp_inquiry cp;
2113
2114 BT_DBG("%s", hdev->name);
2115
2116 if (test_bit(HCI_INQUIRY, &hdev->flags))
2117 return;
2118
2119 /* Start Inquiry */
2120 memcpy(&cp.lap, &ir->lap, 3);
2121 cp.length = ir->length;
2122 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002123 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124}
2125
Andre Guedes3e13fa12013-03-27 20:04:56 -03002126static int wait_inquiry(void *word)
2127{
2128 schedule();
2129 return signal_pending(current);
2130}
2131
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132int hci_inquiry(void __user *arg)
2133{
2134 __u8 __user *ptr = arg;
2135 struct hci_inquiry_req ir;
2136 struct hci_dev *hdev;
2137 int err = 0, do_inquiry = 0, max_rsp;
2138 long timeo;
2139 __u8 *buf;
2140
2141 if (copy_from_user(&ir, ptr, sizeof(ir)))
2142 return -EFAULT;
2143
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002144 hdev = hci_dev_get(ir.dev_id);
2145 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 return -ENODEV;
2147
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002148 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2149 err = -EBUSY;
2150 goto done;
2151 }
2152
Marcel Holtmann4a964402014-07-02 19:10:33 +02002153 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002154 err = -EOPNOTSUPP;
2155 goto done;
2156 }
2157
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002158 if (hdev->dev_type != HCI_BREDR) {
2159 err = -EOPNOTSUPP;
2160 goto done;
2161 }
2162
Johan Hedberg56f87902013-10-02 13:43:13 +03002163 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2164 err = -EOPNOTSUPP;
2165 goto done;
2166 }
2167
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002168 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002169 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002170 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002171 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 do_inquiry = 1;
2173 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002174 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
Marcel Holtmann04837f62006-07-03 10:02:33 +02002176 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002177
2178 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002179 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2180 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002181 if (err < 0)
2182 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002183
2184 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2185 * cleared). If it is interrupted by a signal, return -EINTR.
2186 */
2187 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2188 TASK_INTERRUPTIBLE))
2189 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002190 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002192 /* for unlimited number of responses we will use buffer with
2193 * 255 entries
2194 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2196
2197 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2198 * copy it to the user space.
2199 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002200 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002201 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 err = -ENOMEM;
2203 goto done;
2204 }
2205
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002206 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002208 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210 BT_DBG("num_rsp %d", ir.num_rsp);
2211
2212 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2213 ptr += sizeof(ir);
2214 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002215 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002217 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218 err = -EFAULT;
2219
2220 kfree(buf);
2221
2222done:
2223 hci_dev_put(hdev);
2224 return err;
2225}
2226
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002227static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 int ret = 0;
2230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 BT_DBG("%s %p", hdev->name, hdev);
2232
2233 hci_req_lock(hdev);
2234
Johan Hovold94324962012-03-15 14:48:41 +01002235 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2236 ret = -ENODEV;
2237 goto done;
2238 }
2239
Marcel Holtmannd603b762014-07-06 12:11:14 +02002240 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2241 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002242 /* Check for rfkill but allow the HCI setup stage to
2243 * proceed (which in itself doesn't cause any RF activity).
2244 */
2245 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2246 ret = -ERFKILL;
2247 goto done;
2248 }
2249
2250 /* Check for valid public address or a configured static
2251 * random adddress, but let the HCI setup proceed to
2252 * be able to determine if there is a public address
2253 * or not.
2254 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002255 * In case of user channel usage, it is not important
2256 * if a public address or static random address is
2257 * available.
2258 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002259 * This check is only valid for BR/EDR controllers
2260 * since AMP controllers do not have an address.
2261 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002262 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2263 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002264 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2265 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2266 ret = -EADDRNOTAVAIL;
2267 goto done;
2268 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 if (test_bit(HCI_UP, &hdev->flags)) {
2272 ret = -EALREADY;
2273 goto done;
2274 }
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 if (hdev->open(hdev)) {
2277 ret = -EIO;
2278 goto done;
2279 }
2280
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002281 atomic_set(&hdev->cmd_cnt, 1);
2282 set_bit(HCI_INIT, &hdev->flags);
2283
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002284 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2285 if (hdev->setup)
2286 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002287
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002288 /* The transport driver can set these quirks before
2289 * creating the HCI device or in its setup callback.
2290 *
2291 * In case any of them is set, the controller has to
2292 * start up as unconfigured.
2293 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002294 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2295 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002296 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002297
2298 /* For an unconfigured controller it is required to
2299 * read at least the version information provided by
2300 * the Read Local Version Information command.
2301 *
2302 * If the set_bdaddr driver callback is provided, then
2303 * also the original Bluetooth public device address
2304 * will be read using the Read BD Address command.
2305 */
2306 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2307 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002308 }
2309
Marcel Holtmann9713c172014-07-06 12:11:15 +02002310 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2311 /* If public address change is configured, ensure that
2312 * the address gets programmed. If the driver does not
2313 * support changing the public address, fail the power
2314 * on procedure.
2315 */
2316 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2317 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002318 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2319 else
2320 ret = -EADDRNOTAVAIL;
2321 }
2322
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002323 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002324 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002325 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002326 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 }
2328
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002329 clear_bit(HCI_INIT, &hdev->flags);
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 if (!ret) {
2332 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002333 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 set_bit(HCI_UP, &hdev->flags);
2335 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002336 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002337 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002338 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002339 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002340 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002341 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002342 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002343 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002344 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002345 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002347 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002348 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002349 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
2351 skb_queue_purge(&hdev->cmd_q);
2352 skb_queue_purge(&hdev->rx_q);
2353
2354 if (hdev->flush)
2355 hdev->flush(hdev);
2356
2357 if (hdev->sent_cmd) {
2358 kfree_skb(hdev->sent_cmd);
2359 hdev->sent_cmd = NULL;
2360 }
2361
2362 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002363 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
2366done:
2367 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 return ret;
2369}
2370
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002371/* ---- HCI ioctl helpers ---- */
2372
2373int hci_dev_open(__u16 dev)
2374{
2375 struct hci_dev *hdev;
2376 int err;
2377
2378 hdev = hci_dev_get(dev);
2379 if (!hdev)
2380 return -ENODEV;
2381
Marcel Holtmann4a964402014-07-02 19:10:33 +02002382 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002383 * up as user channel. Trying to bring them up as normal devices
2384 * will result into a failure. Only user channel operation is
2385 * possible.
2386 *
2387 * When this function is called for a user channel, the flag
2388 * HCI_USER_CHANNEL will be set first before attempting to
2389 * open the device.
2390 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002392 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2393 err = -EOPNOTSUPP;
2394 goto done;
2395 }
2396
Johan Hedberge1d08f42013-10-01 22:44:50 +03002397 /* We need to ensure that no other power on/off work is pending
2398 * before proceeding to call hci_dev_do_open. This is
2399 * particularly important if the setup procedure has not yet
2400 * completed.
2401 */
2402 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2403 cancel_delayed_work(&hdev->power_off);
2404
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002405 /* After this call it is guaranteed that the setup procedure
2406 * has finished. This means that error conditions like RFKILL
2407 * or no valid public or static random address apply.
2408 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002409 flush_workqueue(hdev->req_workqueue);
2410
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002411 err = hci_dev_do_open(hdev);
2412
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002413done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002414 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002415 return err;
2416}
2417
Johan Hedbergd7347f32014-07-04 12:37:23 +03002418/* This function requires the caller holds hdev->lock */
2419static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2420{
2421 struct hci_conn_params *p;
2422
2423 list_for_each_entry(p, &hdev->le_conn_params, list)
2424 list_del_init(&p->action);
2425
2426 BT_DBG("All LE pending actions cleared");
2427}
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429static int hci_dev_do_close(struct hci_dev *hdev)
2430{
2431 BT_DBG("%s %p", hdev->name, hdev);
2432
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002433 cancel_delayed_work(&hdev->power_off);
2434
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 hci_req_cancel(hdev, ENODEV);
2436 hci_req_lock(hdev);
2437
2438 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002439 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 hci_req_unlock(hdev);
2441 return 0;
2442 }
2443
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002444 /* Flush RX and TX works */
2445 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002446 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002448 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002449 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002450 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002451 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002452 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002453 }
2454
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002455 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002456 cancel_delayed_work(&hdev->service_cache);
2457
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002458 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002459
2460 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2461 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002462
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002463 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002464 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002466 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002467 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
2469 hci_notify(hdev, HCI_DEV_DOWN);
2470
2471 if (hdev->flush)
2472 hdev->flush(hdev);
2473
2474 /* Reset device */
2475 skb_queue_purge(&hdev->cmd_q);
2476 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002477 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2478 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002479 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002481 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 clear_bit(HCI_INIT, &hdev->flags);
2483 }
2484
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002485 /* flush cmd work */
2486 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
2488 /* Drop queues */
2489 skb_queue_purge(&hdev->rx_q);
2490 skb_queue_purge(&hdev->cmd_q);
2491 skb_queue_purge(&hdev->raw_q);
2492
2493 /* Drop last sent command */
2494 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002495 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 kfree_skb(hdev->sent_cmd);
2497 hdev->sent_cmd = NULL;
2498 }
2499
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002500 kfree_skb(hdev->recv_evt);
2501 hdev->recv_evt = NULL;
2502
Linus Torvalds1da177e2005-04-16 15:20:36 -07002503 /* After this point our queues are empty
2504 * and no tasks are scheduled. */
2505 hdev->close(hdev);
2506
Johan Hedberg35b973c2013-03-15 17:06:59 -05002507 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002508 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002509 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2510
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002511 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2512 if (hdev->dev_type == HCI_BREDR) {
2513 hci_dev_lock(hdev);
2514 mgmt_powered(hdev, 0);
2515 hci_dev_unlock(hdev);
2516 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002517 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002518
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002519 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002520 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002521
Johan Hedberge59fda82012-02-22 18:11:53 +02002522 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002523 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002524 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002525
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 hci_req_unlock(hdev);
2527
2528 hci_dev_put(hdev);
2529 return 0;
2530}
2531
2532int hci_dev_close(__u16 dev)
2533{
2534 struct hci_dev *hdev;
2535 int err;
2536
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002537 hdev = hci_dev_get(dev);
2538 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002540
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002541 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2542 err = -EBUSY;
2543 goto done;
2544 }
2545
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002546 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2547 cancel_delayed_work(&hdev->power_off);
2548
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002550
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002551done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 hci_dev_put(hdev);
2553 return err;
2554}
2555
2556int hci_dev_reset(__u16 dev)
2557{
2558 struct hci_dev *hdev;
2559 int ret = 0;
2560
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002561 hdev = hci_dev_get(dev);
2562 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 return -ENODEV;
2564
2565 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Marcel Holtmann808a0492013-08-26 20:57:58 -07002567 if (!test_bit(HCI_UP, &hdev->flags)) {
2568 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002570 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 ret = -EBUSY;
2574 goto done;
2575 }
2576
Marcel Holtmann4a964402014-07-02 19:10:33 +02002577 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002578 ret = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 /* Drop queues */
2583 skb_queue_purge(&hdev->rx_q);
2584 skb_queue_purge(&hdev->cmd_q);
2585
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002586 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002587 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002589 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590
2591 if (hdev->flush)
2592 hdev->flush(hdev);
2593
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002594 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002595 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002597 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
2599done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 hci_req_unlock(hdev);
2601 hci_dev_put(hdev);
2602 return ret;
2603}
2604
2605int hci_dev_reset_stat(__u16 dev)
2606{
2607 struct hci_dev *hdev;
2608 int ret = 0;
2609
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002610 hdev = hci_dev_get(dev);
2611 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 return -ENODEV;
2613
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002614 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2615 ret = -EBUSY;
2616 goto done;
2617 }
2618
Marcel Holtmann4a964402014-07-02 19:10:33 +02002619 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002620 ret = -EOPNOTSUPP;
2621 goto done;
2622 }
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2625
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002626done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 return ret;
2629}
2630
2631int hci_dev_cmd(unsigned int cmd, void __user *arg)
2632{
2633 struct hci_dev *hdev;
2634 struct hci_dev_req dr;
2635 int err = 0;
2636
2637 if (copy_from_user(&dr, arg, sizeof(dr)))
2638 return -EFAULT;
2639
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002640 hdev = hci_dev_get(dr.dev_id);
2641 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 return -ENODEV;
2643
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002644 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2645 err = -EBUSY;
2646 goto done;
2647 }
2648
Marcel Holtmann4a964402014-07-02 19:10:33 +02002649 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002650 err = -EOPNOTSUPP;
2651 goto done;
2652 }
2653
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002654 if (hdev->dev_type != HCI_BREDR) {
2655 err = -EOPNOTSUPP;
2656 goto done;
2657 }
2658
Johan Hedberg56f87902013-10-02 13:43:13 +03002659 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2660 err = -EOPNOTSUPP;
2661 goto done;
2662 }
2663
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 switch (cmd) {
2665 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002666 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2667 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 break;
2669
2670 case HCISETENCRYPT:
2671 if (!lmp_encrypt_capable(hdev)) {
2672 err = -EOPNOTSUPP;
2673 break;
2674 }
2675
2676 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2677 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002678 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2679 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 if (err)
2681 break;
2682 }
2683
Johan Hedberg01178cd2013-03-05 20:37:41 +02002684 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2685 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 break;
2687
2688 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002689 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2690 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 break;
2692
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002693 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002694 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2695 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002696 break;
2697
2698 case HCISETLINKMODE:
2699 hdev->link_mode = ((__u16) dr.dev_opt) &
2700 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2701 break;
2702
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 case HCISETPTYPE:
2704 hdev->pkt_type = (__u16) dr.dev_opt;
2705 break;
2706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002708 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2709 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 break;
2711
2712 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002713 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2714 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 break;
2716
2717 default:
2718 err = -EINVAL;
2719 break;
2720 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002721
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002722done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 hci_dev_put(hdev);
2724 return err;
2725}
2726
2727int hci_get_dev_list(void __user *arg)
2728{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002729 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 struct hci_dev_list_req *dl;
2731 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 int n = 0, size, err;
2733 __u16 dev_num;
2734
2735 if (get_user(dev_num, (__u16 __user *) arg))
2736 return -EFAULT;
2737
2738 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2739 return -EINVAL;
2740
2741 size = sizeof(*dl) + dev_num * sizeof(*dr);
2742
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002743 dl = kzalloc(size, GFP_KERNEL);
2744 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 return -ENOMEM;
2746
2747 dr = dl->dev_req;
2748
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002749 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002750 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002752 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002754 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 (dr + n)->dev_id = hdev->id;
2758 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002759
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 if (++n >= dev_num)
2761 break;
2762 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002763 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 dl->dev_num = n;
2766 size = sizeof(*dl) + n * sizeof(*dr);
2767
2768 err = copy_to_user(arg, dl, size);
2769 kfree(dl);
2770
2771 return err ? -EFAULT : 0;
2772}
2773
2774int hci_get_dev_info(void __user *arg)
2775{
2776 struct hci_dev *hdev;
2777 struct hci_dev_info di;
2778 int err = 0;
2779
2780 if (copy_from_user(&di, arg, sizeof(di)))
2781 return -EFAULT;
2782
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002783 hdev = hci_dev_get(di.dev_id);
2784 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 return -ENODEV;
2786
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002787 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002788 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002789
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002790 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2791 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002792
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 strcpy(di.name, hdev->name);
2794 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002795 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 di.flags = hdev->flags;
2797 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002798 if (lmp_bredr_capable(hdev)) {
2799 di.acl_mtu = hdev->acl_mtu;
2800 di.acl_pkts = hdev->acl_pkts;
2801 di.sco_mtu = hdev->sco_mtu;
2802 di.sco_pkts = hdev->sco_pkts;
2803 } else {
2804 di.acl_mtu = hdev->le_mtu;
2805 di.acl_pkts = hdev->le_pkts;
2806 di.sco_mtu = 0;
2807 di.sco_pkts = 0;
2808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 di.link_policy = hdev->link_policy;
2810 di.link_mode = hdev->link_mode;
2811
2812 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2813 memcpy(&di.features, &hdev->features, sizeof(di.features));
2814
2815 if (copy_to_user(arg, &di, sizeof(di)))
2816 err = -EFAULT;
2817
2818 hci_dev_put(hdev);
2819
2820 return err;
2821}
2822
2823/* ---- Interface to HCI drivers ---- */
2824
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002825static int hci_rfkill_set_block(void *data, bool blocked)
2826{
2827 struct hci_dev *hdev = data;
2828
2829 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2830
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002831 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2832 return -EBUSY;
2833
Johan Hedberg5e130362013-09-13 08:58:17 +03002834 if (blocked) {
2835 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002836 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2837 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002838 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002839 } else {
2840 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002841 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002842
2843 return 0;
2844}
2845
2846static const struct rfkill_ops hci_rfkill_ops = {
2847 .set_block = hci_rfkill_set_block,
2848};
2849
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002850static void hci_power_on(struct work_struct *work)
2851{
2852 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002853 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002854
2855 BT_DBG("%s", hdev->name);
2856
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002857 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002858 if (err < 0) {
2859 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002860 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002861 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002862
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002863 /* During the HCI setup phase, a few error conditions are
2864 * ignored and they need to be checked now. If they are still
2865 * valid, it is important to turn the device back off.
2866 */
2867 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002868 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002869 (hdev->dev_type == HCI_BREDR &&
2870 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2871 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002872 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2873 hci_dev_do_close(hdev);
2874 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002875 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2876 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002877 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002878
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002879 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002880 /* For unconfigured devices, set the HCI_RAW flag
2881 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002882 */
2883 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2884 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002885
2886 /* For fully configured devices, this will send
2887 * the Index Added event. For unconfigured devices,
2888 * it will send Unconfigued Index Added event.
2889 *
2890 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2891 * and no event will be send.
2892 */
2893 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002894 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002895 /* When the controller is now configured, then it
2896 * is important to clear the HCI_RAW flag.
2897 */
2898 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2899 clear_bit(HCI_RAW, &hdev->flags);
2900
Marcel Holtmannd603b762014-07-06 12:11:14 +02002901 /* Powering on the controller with HCI_CONFIG set only
2902 * happens with the transition from unconfigured to
2903 * configured. This will send the Index Added event.
2904 */
2905 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002906 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002907}
2908
2909static void hci_power_off(struct work_struct *work)
2910{
Johan Hedberg32435532011-11-07 22:16:04 +02002911 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002912 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002913
2914 BT_DBG("%s", hdev->name);
2915
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002916 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002917}
2918
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002919static void hci_discov_off(struct work_struct *work)
2920{
2921 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002922
2923 hdev = container_of(work, struct hci_dev, discov_off.work);
2924
2925 BT_DBG("%s", hdev->name);
2926
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002927 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002928}
2929
Johan Hedberg35f74982014-02-18 17:14:32 +02002930void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002931{
Johan Hedberg48210022013-01-27 00:31:28 +02002932 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002933
Johan Hedberg48210022013-01-27 00:31:28 +02002934 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2935 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002936 kfree(uuid);
2937 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002938}
2939
Johan Hedberg35f74982014-02-18 17:14:32 +02002940void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002941{
2942 struct list_head *p, *n;
2943
2944 list_for_each_safe(p, n, &hdev->link_keys) {
2945 struct link_key *key;
2946
2947 key = list_entry(p, struct link_key, list);
2948
2949 list_del(p);
2950 kfree(key);
2951 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002952}
2953
Johan Hedberg35f74982014-02-18 17:14:32 +02002954void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002955{
2956 struct smp_ltk *k, *tmp;
2957
2958 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2959 list_del(&k->list);
2960 kfree(k);
2961 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002962}
2963
Johan Hedberg970c4e42014-02-18 10:19:33 +02002964void hci_smp_irks_clear(struct hci_dev *hdev)
2965{
2966 struct smp_irk *k, *tmp;
2967
2968 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2969 list_del(&k->list);
2970 kfree(k);
2971 }
2972}
2973
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002974struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2975{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002976 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002977
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002978 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002979 if (bacmp(bdaddr, &k->bdaddr) == 0)
2980 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002981
2982 return NULL;
2983}
2984
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302985static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002986 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002987{
2988 /* Legacy key */
2989 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302990 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002991
2992 /* Debug keys are insecure so don't store them persistently */
2993 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302994 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002995
2996 /* Changed combination key and there's no previous one */
2997 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302998 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002999
3000 /* Security mode 3 case */
3001 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303002 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003003
3004 /* Neither local nor remote side had no-bonding as requirement */
3005 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303006 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003007
3008 /* Local side had dedicated bonding as requirement */
3009 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303010 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003011
3012 /* Remote side had dedicated bonding as requirement */
3013 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303014 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003015
3016 /* If none of the above criteria match, then don't store the key
3017 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303018 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003019}
3020
Johan Hedberg98a0b842014-01-30 19:40:00 -08003021static bool ltk_type_master(u8 type)
3022{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003023 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003024}
3025
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003026struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003027 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003029 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003030
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003031 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003032 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003033 continue;
3034
Johan Hedberg98a0b842014-01-30 19:40:00 -08003035 if (ltk_type_master(k->type) != master)
3036 continue;
3037
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003038 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003039 }
3040
3041 return NULL;
3042}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003044struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003045 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003046{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003047 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003048
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003049 list_for_each_entry(k, &hdev->long_term_keys, list)
3050 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003051 bacmp(bdaddr, &k->bdaddr) == 0 &&
3052 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003053 return k;
3054
3055 return NULL;
3056}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003057
Johan Hedberg970c4e42014-02-18 10:19:33 +02003058struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3059{
3060 struct smp_irk *irk;
3061
3062 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3063 if (!bacmp(&irk->rpa, rpa))
3064 return irk;
3065 }
3066
3067 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3068 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3069 bacpy(&irk->rpa, rpa);
3070 return irk;
3071 }
3072 }
3073
3074 return NULL;
3075}
3076
3077struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3078 u8 addr_type)
3079{
3080 struct smp_irk *irk;
3081
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003082 /* Identity Address must be public or static random */
3083 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3084 return NULL;
3085
Johan Hedberg970c4e42014-02-18 10:19:33 +02003086 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3087 if (addr_type == irk->addr_type &&
3088 bacmp(bdaddr, &irk->bdaddr) == 0)
3089 return irk;
3090 }
3091
3092 return NULL;
3093}
3094
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003095struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003096 bdaddr_t *bdaddr, u8 *val, u8 type,
3097 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003098{
3099 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303100 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101
3102 old_key = hci_find_link_key(hdev, bdaddr);
3103 if (old_key) {
3104 old_key_type = old_key->type;
3105 key = old_key;
3106 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003107 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003108 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003109 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003110 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003111 list_add(&key->list, &hdev->link_keys);
3112 }
3113
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003114 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003115
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003116 /* Some buggy controller combinations generate a changed
3117 * combination key for legacy pairing even when there's no
3118 * previous key */
3119 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003120 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003121 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003122 if (conn)
3123 conn->key_type = type;
3124 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003125
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003126 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003127 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003128 key->pin_len = pin_len;
3129
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003130 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003131 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003132 else
3133 key->type = type;
3134
Johan Hedberg7652ff62014-06-24 13:15:49 +03003135 if (persistent)
3136 *persistent = hci_persistent_key(hdev, conn, type,
3137 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003138
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003139 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003140}
3141
Johan Hedbergca9142b2014-02-19 14:57:44 +02003142struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003143 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003144 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003145{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003146 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003147 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003148
Johan Hedberg98a0b842014-01-30 19:40:00 -08003149 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003150 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003151 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003152 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003153 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003154 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003155 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003156 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003157 }
3158
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003159 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003160 key->bdaddr_type = addr_type;
3161 memcpy(key->val, tk, sizeof(key->val));
3162 key->authenticated = authenticated;
3163 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003164 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003165 key->enc_size = enc_size;
3166 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003167
Johan Hedbergca9142b2014-02-19 14:57:44 +02003168 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003169}
3170
Johan Hedbergca9142b2014-02-19 14:57:44 +02003171struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003173{
3174 struct smp_irk *irk;
3175
3176 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3177 if (!irk) {
3178 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3179 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003180 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003181
3182 bacpy(&irk->bdaddr, bdaddr);
3183 irk->addr_type = addr_type;
3184
3185 list_add(&irk->list, &hdev->identity_resolving_keys);
3186 }
3187
3188 memcpy(irk->val, val, 16);
3189 bacpy(&irk->rpa, rpa);
3190
Johan Hedbergca9142b2014-02-19 14:57:44 +02003191 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003192}
3193
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003194int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3195{
3196 struct link_key *key;
3197
3198 key = hci_find_link_key(hdev, bdaddr);
3199 if (!key)
3200 return -ENOENT;
3201
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003202 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003203
3204 list_del(&key->list);
3205 kfree(key);
3206
3207 return 0;
3208}
3209
Johan Hedberge0b2b272014-02-18 17:14:31 +02003210int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003211{
3212 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003213 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003214
3215 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003216 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003217 continue;
3218
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003219 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003220
3221 list_del(&k->list);
3222 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003223 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003224 }
3225
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003226 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003227}
3228
Johan Hedberga7ec7332014-02-18 17:14:35 +02003229void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3230{
3231 struct smp_irk *k, *tmp;
3232
Johan Hedberg668b7b12014-02-21 16:03:31 +02003233 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003234 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3235 continue;
3236
3237 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3238
3239 list_del(&k->list);
3240 kfree(k);
3241 }
3242}
3243
Ville Tervo6bd32322011-02-16 16:32:41 +02003244/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003245static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003246{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003247 struct hci_dev *hdev = container_of(work, struct hci_dev,
3248 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003249
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003250 if (hdev->sent_cmd) {
3251 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3252 u16 opcode = __le16_to_cpu(sent->opcode);
3253
3254 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3255 } else {
3256 BT_ERR("%s command tx timeout", hdev->name);
3257 }
3258
Ville Tervo6bd32322011-02-16 16:32:41 +02003259 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003260 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003261}
3262
Szymon Janc2763eda2011-03-22 13:12:22 +01003263struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003264 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003265{
3266 struct oob_data *data;
3267
3268 list_for_each_entry(data, &hdev->remote_oob_data, list)
3269 if (bacmp(bdaddr, &data->bdaddr) == 0)
3270 return data;
3271
3272 return NULL;
3273}
3274
3275int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3276{
3277 struct oob_data *data;
3278
3279 data = hci_find_remote_oob_data(hdev, bdaddr);
3280 if (!data)
3281 return -ENOENT;
3282
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003283 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003284
3285 list_del(&data->list);
3286 kfree(data);
3287
3288 return 0;
3289}
3290
Johan Hedberg35f74982014-02-18 17:14:32 +02003291void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003292{
3293 struct oob_data *data, *n;
3294
3295 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3296 list_del(&data->list);
3297 kfree(data);
3298 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003299}
3300
Marcel Holtmann07988722014-01-10 02:07:29 -08003301int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3302 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003303{
3304 struct oob_data *data;
3305
3306 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003307 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003308 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003309 if (!data)
3310 return -ENOMEM;
3311
3312 bacpy(&data->bdaddr, bdaddr);
3313 list_add(&data->list, &hdev->remote_oob_data);
3314 }
3315
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003316 memcpy(data->hash192, hash, sizeof(data->hash192));
3317 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003318
Marcel Holtmann07988722014-01-10 02:07:29 -08003319 memset(data->hash256, 0, sizeof(data->hash256));
3320 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3321
3322 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3323
3324 return 0;
3325}
3326
3327int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3328 u8 *hash192, u8 *randomizer192,
3329 u8 *hash256, u8 *randomizer256)
3330{
3331 struct oob_data *data;
3332
3333 data = hci_find_remote_oob_data(hdev, bdaddr);
3334 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003335 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003336 if (!data)
3337 return -ENOMEM;
3338
3339 bacpy(&data->bdaddr, bdaddr);
3340 list_add(&data->list, &hdev->remote_oob_data);
3341 }
3342
3343 memcpy(data->hash192, hash192, sizeof(data->hash192));
3344 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3345
3346 memcpy(data->hash256, hash256, sizeof(data->hash256));
3347 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3348
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003349 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003350
3351 return 0;
3352}
3353
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003354struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3355 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003356{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003357 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003358
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003359 list_for_each_entry(b, &hdev->blacklist, list) {
3360 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003361 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003362 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003363
3364 return NULL;
3365}
3366
Marcel Holtmannc9507492014-02-27 19:35:54 -08003367static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003368{
3369 struct list_head *p, *n;
3370
3371 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003372 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003373
3374 list_del(p);
3375 kfree(b);
3376 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003377}
3378
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003379int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003380{
3381 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003382
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003383 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003384 return -EBADF;
3385
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003386 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003387 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003388
3389 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003390 if (!entry)
3391 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003392
3393 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003394 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003395
3396 list_add(&entry->list, &hdev->blacklist);
3397
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003398 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003399}
3400
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003401int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003402{
3403 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003404
Johan Hedberg35f74982014-02-18 17:14:32 +02003405 if (!bacmp(bdaddr, BDADDR_ANY)) {
3406 hci_blacklist_clear(hdev);
3407 return 0;
3408 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003409
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003410 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003411 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003412 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003413
3414 list_del(&entry->list);
3415 kfree(entry);
3416
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003417 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003418}
3419
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003420struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3421 bdaddr_t *bdaddr, u8 type)
3422{
3423 struct bdaddr_list *b;
3424
3425 list_for_each_entry(b, &hdev->le_white_list, list) {
3426 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3427 return b;
3428 }
3429
3430 return NULL;
3431}
3432
3433void hci_white_list_clear(struct hci_dev *hdev)
3434{
3435 struct list_head *p, *n;
3436
3437 list_for_each_safe(p, n, &hdev->le_white_list) {
3438 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3439
3440 list_del(p);
3441 kfree(b);
3442 }
3443}
3444
3445int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3446{
3447 struct bdaddr_list *entry;
3448
3449 if (!bacmp(bdaddr, BDADDR_ANY))
3450 return -EBADF;
3451
3452 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3453 if (!entry)
3454 return -ENOMEM;
3455
3456 bacpy(&entry->bdaddr, bdaddr);
3457 entry->bdaddr_type = type;
3458
3459 list_add(&entry->list, &hdev->le_white_list);
3460
3461 return 0;
3462}
3463
3464int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3465{
3466 struct bdaddr_list *entry;
3467
3468 if (!bacmp(bdaddr, BDADDR_ANY))
3469 return -EBADF;
3470
3471 entry = hci_white_list_lookup(hdev, bdaddr, type);
3472 if (!entry)
3473 return -ENOENT;
3474
3475 list_del(&entry->list);
3476 kfree(entry);
3477
3478 return 0;
3479}
3480
Andre Guedes15819a72014-02-03 13:56:18 -03003481/* This function requires the caller holds hdev->lock */
3482struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3483 bdaddr_t *addr, u8 addr_type)
3484{
3485 struct hci_conn_params *params;
3486
Johan Hedberg738f6182014-07-03 19:33:51 +03003487 /* The conn params list only contains identity addresses */
3488 if (!hci_is_identity_address(addr, addr_type))
3489 return NULL;
3490
Andre Guedes15819a72014-02-03 13:56:18 -03003491 list_for_each_entry(params, &hdev->le_conn_params, list) {
3492 if (bacmp(&params->addr, addr) == 0 &&
3493 params->addr_type == addr_type) {
3494 return params;
3495 }
3496 }
3497
3498 return NULL;
3499}
3500
Andre Guedescef952c2014-02-26 20:21:49 -03003501static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3502{
3503 struct hci_conn *conn;
3504
3505 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3506 if (!conn)
3507 return false;
3508
3509 if (conn->dst_type != type)
3510 return false;
3511
3512 if (conn->state != BT_CONNECTED)
3513 return false;
3514
3515 return true;
3516}
3517
Andre Guedes15819a72014-02-03 13:56:18 -03003518/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003519struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3520 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003521{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003522 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003523
Johan Hedberg738f6182014-07-03 19:33:51 +03003524 /* The list only contains identity addresses */
3525 if (!hci_is_identity_address(addr, addr_type))
3526 return NULL;
3527
Johan Hedberg501f8822014-07-04 12:37:26 +03003528 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003529 if (bacmp(&param->addr, addr) == 0 &&
3530 param->addr_type == addr_type)
3531 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003532 }
3533
3534 return NULL;
3535}
3536
3537/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003538struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3539 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003540{
3541 struct hci_conn_params *params;
3542
Johan Hedbergc46245b2014-07-02 17:37:33 +03003543 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003544 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003545
3546 params = hci_conn_params_lookup(hdev, addr, addr_type);
3547 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003548 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003549
3550 params = kzalloc(sizeof(*params), GFP_KERNEL);
3551 if (!params) {
3552 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003553 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003554 }
3555
3556 bacpy(&params->addr, addr);
3557 params->addr_type = addr_type;
3558
3559 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003560 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003561
3562 params->conn_min_interval = hdev->le_conn_min_interval;
3563 params->conn_max_interval = hdev->le_conn_max_interval;
3564 params->conn_latency = hdev->le_conn_latency;
3565 params->supervision_timeout = hdev->le_supv_timeout;
3566 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3567
3568 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3569
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003570 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003571}
3572
3573/* This function requires the caller holds hdev->lock */
3574int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003575 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003576{
3577 struct hci_conn_params *params;
3578
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003579 params = hci_conn_params_add(hdev, addr, addr_type);
3580 if (!params)
3581 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003582
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003583 if (params->auto_connect == auto_connect)
3584 return 0;
3585
Johan Hedberg95305ba2014-07-04 12:37:21 +03003586 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003587
Andre Guedescef952c2014-02-26 20:21:49 -03003588 switch (auto_connect) {
3589 case HCI_AUTO_CONN_DISABLED:
3590 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003591 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003592 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003593 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003594 list_add(&params->action, &hdev->pend_le_reports);
3595 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003596 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003597 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003598 if (!is_connected(hdev, addr, addr_type)) {
3599 list_add(&params->action, &hdev->pend_le_conns);
3600 hci_update_background_scan(hdev);
3601 }
Andre Guedescef952c2014-02-26 20:21:49 -03003602 break;
3603 }
Andre Guedes15819a72014-02-03 13:56:18 -03003604
Johan Hedberg851efca2014-07-02 22:42:00 +03003605 params->auto_connect = auto_connect;
3606
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003607 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3608 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003609
3610 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003611}
3612
3613/* This function requires the caller holds hdev->lock */
3614void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3615{
3616 struct hci_conn_params *params;
3617
3618 params = hci_conn_params_lookup(hdev, addr, addr_type);
3619 if (!params)
3620 return;
3621
Johan Hedberg95305ba2014-07-04 12:37:21 +03003622 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003623 list_del(&params->list);
3624 kfree(params);
3625
Johan Hedberg95305ba2014-07-04 12:37:21 +03003626 hci_update_background_scan(hdev);
3627
Andre Guedes15819a72014-02-03 13:56:18 -03003628 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3629}
3630
3631/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003632void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3633{
3634 struct hci_conn_params *params, *tmp;
3635
3636 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3637 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3638 continue;
3639 list_del(&params->list);
3640 kfree(params);
3641 }
3642
3643 BT_DBG("All LE disabled connection parameters were removed");
3644}
3645
3646/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003647void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003648{
3649 struct hci_conn_params *params, *tmp;
3650
3651 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003652 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003653 list_del(&params->list);
3654 kfree(params);
3655 }
3656
Johan Hedberga2f41a82014-07-04 12:37:19 +03003657 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003658
Andre Guedes15819a72014-02-03 13:56:18 -03003659 BT_DBG("All LE connection parameters were removed");
3660}
3661
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003662static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003663{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003664 if (status) {
3665 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003666
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003667 hci_dev_lock(hdev);
3668 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3669 hci_dev_unlock(hdev);
3670 return;
3671 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003672}
3673
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003674static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003675{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003676 /* General inquiry access code (GIAC) */
3677 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3678 struct hci_request req;
3679 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003680 int err;
3681
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003682 if (status) {
3683 BT_ERR("Failed to disable LE scanning: status %d", status);
3684 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003685 }
3686
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003687 switch (hdev->discovery.type) {
3688 case DISCOV_TYPE_LE:
3689 hci_dev_lock(hdev);
3690 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3691 hci_dev_unlock(hdev);
3692 break;
3693
3694 case DISCOV_TYPE_INTERLEAVED:
3695 hci_req_init(&req, hdev);
3696
3697 memset(&cp, 0, sizeof(cp));
3698 memcpy(&cp.lap, lap, sizeof(cp.lap));
3699 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3700 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3701
3702 hci_dev_lock(hdev);
3703
3704 hci_inquiry_cache_flush(hdev);
3705
3706 err = hci_req_run(&req, inquiry_complete);
3707 if (err) {
3708 BT_ERR("Inquiry request failed: err %d", err);
3709 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3710 }
3711
3712 hci_dev_unlock(hdev);
3713 break;
3714 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003715}
3716
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003717static void le_scan_disable_work(struct work_struct *work)
3718{
3719 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003720 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003721 struct hci_request req;
3722 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003723
3724 BT_DBG("%s", hdev->name);
3725
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003726 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003727
Andre Guedesb1efcc22014-02-26 20:21:40 -03003728 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003729
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003730 err = hci_req_run(&req, le_scan_disable_work_complete);
3731 if (err)
3732 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003733}
3734
Johan Hedberg8d972502014-02-28 12:54:14 +02003735static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3736{
3737 struct hci_dev *hdev = req->hdev;
3738
3739 /* If we're advertising or initiating an LE connection we can't
3740 * go ahead and change the random address at this time. This is
3741 * because the eventual initiator address used for the
3742 * subsequently created connection will be undefined (some
3743 * controllers use the new address and others the one we had
3744 * when the operation started).
3745 *
3746 * In this kind of scenario skip the update and let the random
3747 * address be updated at the next cycle.
3748 */
3749 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3750 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3751 BT_DBG("Deferring random address update");
3752 return;
3753 }
3754
3755 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3756}
3757
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003758int hci_update_random_address(struct hci_request *req, bool require_privacy,
3759 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003760{
3761 struct hci_dev *hdev = req->hdev;
3762 int err;
3763
3764 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003765 * current RPA has expired or there is something else than
3766 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003767 */
3768 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003769 int to;
3770
3771 *own_addr_type = ADDR_LE_DEV_RANDOM;
3772
3773 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003774 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003775 return 0;
3776
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003777 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003778 if (err < 0) {
3779 BT_ERR("%s failed to generate new RPA", hdev->name);
3780 return err;
3781 }
3782
Johan Hedberg8d972502014-02-28 12:54:14 +02003783 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003784
3785 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3786 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3787
3788 return 0;
3789 }
3790
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003791 /* In case of required privacy without resolvable private address,
3792 * use an unresolvable private address. This is useful for active
3793 * scanning and non-connectable advertising.
3794 */
3795 if (require_privacy) {
3796 bdaddr_t urpa;
3797
3798 get_random_bytes(&urpa, 6);
3799 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3800
3801 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003802 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003803 return 0;
3804 }
3805
Johan Hedbergebd3a742014-02-23 19:42:21 +02003806 /* If forcing static address is in use or there is no public
3807 * address use the static address as random address (but skip
3808 * the HCI command if the current random address is already the
3809 * static one.
3810 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003811 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003812 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3813 *own_addr_type = ADDR_LE_DEV_RANDOM;
3814 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3815 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3816 &hdev->static_addr);
3817 return 0;
3818 }
3819
3820 /* Neither privacy nor static address is being used so use a
3821 * public address.
3822 */
3823 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3824
3825 return 0;
3826}
3827
Johan Hedberga1f4c312014-02-27 14:05:41 +02003828/* Copy the Identity Address of the controller.
3829 *
3830 * If the controller has a public BD_ADDR, then by default use that one.
3831 * If this is a LE only controller without a public address, default to
3832 * the static random address.
3833 *
3834 * For debugging purposes it is possible to force controllers with a
3835 * public address to use the static random address instead.
3836 */
3837void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3838 u8 *bdaddr_type)
3839{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003840 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003841 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3842 bacpy(bdaddr, &hdev->static_addr);
3843 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3844 } else {
3845 bacpy(bdaddr, &hdev->bdaddr);
3846 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3847 }
3848}
3849
David Herrmann9be0dab2012-04-22 14:39:57 +02003850/* Alloc HCI device */
3851struct hci_dev *hci_alloc_dev(void)
3852{
3853 struct hci_dev *hdev;
3854
3855 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3856 if (!hdev)
3857 return NULL;
3858
David Herrmannb1b813d2012-04-22 14:39:58 +02003859 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3860 hdev->esco_type = (ESCO_HV1);
3861 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003862 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3863 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003864 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003865 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3866 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003867
David Herrmannb1b813d2012-04-22 14:39:58 +02003868 hdev->sniff_max_interval = 800;
3869 hdev->sniff_min_interval = 80;
3870
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003871 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003872 hdev->le_scan_interval = 0x0060;
3873 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003874 hdev->le_conn_min_interval = 0x0028;
3875 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003876 hdev->le_conn_latency = 0x0000;
3877 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003878
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003879 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003880 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003881 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3882 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003883
David Herrmannb1b813d2012-04-22 14:39:58 +02003884 mutex_init(&hdev->lock);
3885 mutex_init(&hdev->req_lock);
3886
3887 INIT_LIST_HEAD(&hdev->mgmt_pending);
3888 INIT_LIST_HEAD(&hdev->blacklist);
3889 INIT_LIST_HEAD(&hdev->uuids);
3890 INIT_LIST_HEAD(&hdev->link_keys);
3891 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003892 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003893 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003894 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003895 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003896 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003897 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003898 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003899
3900 INIT_WORK(&hdev->rx_work, hci_rx_work);
3901 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3902 INIT_WORK(&hdev->tx_work, hci_tx_work);
3903 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003904
David Herrmannb1b813d2012-04-22 14:39:58 +02003905 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3906 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3907 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3908
David Herrmannb1b813d2012-04-22 14:39:58 +02003909 skb_queue_head_init(&hdev->rx_q);
3910 skb_queue_head_init(&hdev->cmd_q);
3911 skb_queue_head_init(&hdev->raw_q);
3912
3913 init_waitqueue_head(&hdev->req_wait_q);
3914
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003915 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003916
David Herrmannb1b813d2012-04-22 14:39:58 +02003917 hci_init_sysfs(hdev);
3918 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003919
3920 return hdev;
3921}
3922EXPORT_SYMBOL(hci_alloc_dev);
3923
3924/* Free HCI device */
3925void hci_free_dev(struct hci_dev *hdev)
3926{
David Herrmann9be0dab2012-04-22 14:39:57 +02003927 /* will free via device release */
3928 put_device(&hdev->dev);
3929}
3930EXPORT_SYMBOL(hci_free_dev);
3931
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932/* Register HCI device */
3933int hci_register_dev(struct hci_dev *hdev)
3934{
David Herrmannb1b813d2012-04-22 14:39:58 +02003935 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936
Marcel Holtmann74292d52014-07-06 15:50:27 +02003937 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 return -EINVAL;
3939
Mat Martineau08add512011-11-02 16:18:36 -07003940 /* Do not allow HCI_AMP devices to register at index 0,
3941 * so the index can be used as the AMP controller ID.
3942 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003943 switch (hdev->dev_type) {
3944 case HCI_BREDR:
3945 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3946 break;
3947 case HCI_AMP:
3948 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3949 break;
3950 default:
3951 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003953
Sasha Levin3df92b32012-05-27 22:36:56 +02003954 if (id < 0)
3955 return id;
3956
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 sprintf(hdev->name, "hci%d", id);
3958 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003959
3960 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3961
Kees Cookd8537542013-07-03 15:04:57 -07003962 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3963 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003964 if (!hdev->workqueue) {
3965 error = -ENOMEM;
3966 goto err;
3967 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003968
Kees Cookd8537542013-07-03 15:04:57 -07003969 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3970 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003971 if (!hdev->req_workqueue) {
3972 destroy_workqueue(hdev->workqueue);
3973 error = -ENOMEM;
3974 goto err;
3975 }
3976
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003977 if (!IS_ERR_OR_NULL(bt_debugfs))
3978 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3979
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003980 dev_set_name(&hdev->dev, "%s", hdev->name);
3981
Johan Hedberg99780a72014-02-18 10:40:07 +02003982 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3983 CRYPTO_ALG_ASYNC);
3984 if (IS_ERR(hdev->tfm_aes)) {
3985 BT_ERR("Unable to create crypto context");
3986 error = PTR_ERR(hdev->tfm_aes);
3987 hdev->tfm_aes = NULL;
3988 goto err_wqueue;
3989 }
3990
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003991 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003992 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003993 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003995 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003996 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3997 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003998 if (hdev->rfkill) {
3999 if (rfkill_register(hdev->rfkill) < 0) {
4000 rfkill_destroy(hdev->rfkill);
4001 hdev->rfkill = NULL;
4002 }
4003 }
4004
Johan Hedberg5e130362013-09-13 08:58:17 +03004005 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4006 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4007
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004008 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004009 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004010
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004011 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004012 /* Assume BR/EDR support until proven otherwise (such as
4013 * through reading supported features during init.
4014 */
4015 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4016 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004017
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004018 write_lock(&hci_dev_list_lock);
4019 list_add(&hdev->list, &hci_dev_list);
4020 write_unlock(&hci_dev_list_lock);
4021
Marcel Holtmann4a964402014-07-02 19:10:33 +02004022 /* Devices that are marked for raw-only usage are unconfigured
4023 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004024 */
4025 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004026 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004027
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004029 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004030
Johan Hedberg19202572013-01-14 22:33:51 +02004031 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004032
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004034
Johan Hedberg99780a72014-02-18 10:40:07 +02004035err_tfm:
4036 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004037err_wqueue:
4038 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004039 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004040err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004041 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004042
David Herrmann33ca9542011-10-08 14:58:49 +02004043 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044}
4045EXPORT_SYMBOL(hci_register_dev);
4046
4047/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004048void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049{
Sasha Levin3df92b32012-05-27 22:36:56 +02004050 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004051
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004052 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053
Johan Hovold94324962012-03-15 14:48:41 +01004054 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4055
Sasha Levin3df92b32012-05-27 22:36:56 +02004056 id = hdev->id;
4057
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004058 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004060 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
4062 hci_dev_do_close(hdev);
4063
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304064 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004065 kfree_skb(hdev->reassembly[i]);
4066
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004067 cancel_work_sync(&hdev->power_on);
4068
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004069 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004070 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4071 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004072 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004073 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004074 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004075 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004076
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004077 /* mgmt_index_removed should take care of emptying the
4078 * pending list */
4079 BUG_ON(!list_empty(&hdev->mgmt_pending));
4080
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 hci_notify(hdev, HCI_DEV_UNREG);
4082
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004083 if (hdev->rfkill) {
4084 rfkill_unregister(hdev->rfkill);
4085 rfkill_destroy(hdev->rfkill);
4086 }
4087
Johan Hedberg99780a72014-02-18 10:40:07 +02004088 if (hdev->tfm_aes)
4089 crypto_free_blkcipher(hdev->tfm_aes);
4090
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004091 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004092
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004093 debugfs_remove_recursive(hdev->debugfs);
4094
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004095 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004096 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004097
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004098 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004099 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004100 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004101 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004102 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004103 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004104 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004105 hci_white_list_clear(hdev);
Johan Hedberg373110c2014-07-02 17:37:25 +03004106 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004107 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004108
David Herrmanndc946bd2012-01-07 15:47:24 +01004109 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004110
4111 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112}
4113EXPORT_SYMBOL(hci_unregister_dev);
4114
4115/* Suspend HCI device */
4116int hci_suspend_dev(struct hci_dev *hdev)
4117{
4118 hci_notify(hdev, HCI_DEV_SUSPEND);
4119 return 0;
4120}
4121EXPORT_SYMBOL(hci_suspend_dev);
4122
4123/* Resume HCI device */
4124int hci_resume_dev(struct hci_dev *hdev)
4125{
4126 hci_notify(hdev, HCI_DEV_RESUME);
4127 return 0;
4128}
4129EXPORT_SYMBOL(hci_resume_dev);
4130
Marcel Holtmann76bca882009-11-18 00:40:39 +01004131/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004132int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004133{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004134 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004135 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004136 kfree_skb(skb);
4137 return -ENXIO;
4138 }
4139
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004140 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004141 bt_cb(skb)->incoming = 1;
4142
4143 /* Time stamp */
4144 __net_timestamp(skb);
4145
Marcel Holtmann76bca882009-11-18 00:40:39 +01004146 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004147 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004148
Marcel Holtmann76bca882009-11-18 00:40:39 +01004149 return 0;
4150}
4151EXPORT_SYMBOL(hci_recv_frame);
4152
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304153static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004154 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304155{
4156 int len = 0;
4157 int hlen = 0;
4158 int remain = count;
4159 struct sk_buff *skb;
4160 struct bt_skb_cb *scb;
4161
4162 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004163 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304164 return -EILSEQ;
4165
4166 skb = hdev->reassembly[index];
4167
4168 if (!skb) {
4169 switch (type) {
4170 case HCI_ACLDATA_PKT:
4171 len = HCI_MAX_FRAME_SIZE;
4172 hlen = HCI_ACL_HDR_SIZE;
4173 break;
4174 case HCI_EVENT_PKT:
4175 len = HCI_MAX_EVENT_SIZE;
4176 hlen = HCI_EVENT_HDR_SIZE;
4177 break;
4178 case HCI_SCODATA_PKT:
4179 len = HCI_MAX_SCO_SIZE;
4180 hlen = HCI_SCO_HDR_SIZE;
4181 break;
4182 }
4183
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004184 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304185 if (!skb)
4186 return -ENOMEM;
4187
4188 scb = (void *) skb->cb;
4189 scb->expect = hlen;
4190 scb->pkt_type = type;
4191
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304192 hdev->reassembly[index] = skb;
4193 }
4194
4195 while (count) {
4196 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004197 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304198
4199 memcpy(skb_put(skb, len), data, len);
4200
4201 count -= len;
4202 data += len;
4203 scb->expect -= len;
4204 remain = count;
4205
4206 switch (type) {
4207 case HCI_EVENT_PKT:
4208 if (skb->len == HCI_EVENT_HDR_SIZE) {
4209 struct hci_event_hdr *h = hci_event_hdr(skb);
4210 scb->expect = h->plen;
4211
4212 if (skb_tailroom(skb) < scb->expect) {
4213 kfree_skb(skb);
4214 hdev->reassembly[index] = NULL;
4215 return -ENOMEM;
4216 }
4217 }
4218 break;
4219
4220 case HCI_ACLDATA_PKT:
4221 if (skb->len == HCI_ACL_HDR_SIZE) {
4222 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4223 scb->expect = __le16_to_cpu(h->dlen);
4224
4225 if (skb_tailroom(skb) < scb->expect) {
4226 kfree_skb(skb);
4227 hdev->reassembly[index] = NULL;
4228 return -ENOMEM;
4229 }
4230 }
4231 break;
4232
4233 case HCI_SCODATA_PKT:
4234 if (skb->len == HCI_SCO_HDR_SIZE) {
4235 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4236 scb->expect = h->dlen;
4237
4238 if (skb_tailroom(skb) < scb->expect) {
4239 kfree_skb(skb);
4240 hdev->reassembly[index] = NULL;
4241 return -ENOMEM;
4242 }
4243 }
4244 break;
4245 }
4246
4247 if (scb->expect == 0) {
4248 /* Complete frame */
4249
4250 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004251 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304252
4253 hdev->reassembly[index] = NULL;
4254 return remain;
4255 }
4256 }
4257
4258 return remain;
4259}
4260
Marcel Holtmannef222012007-07-11 06:42:04 +02004261int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4262{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304263 int rem = 0;
4264
Marcel Holtmannef222012007-07-11 06:42:04 +02004265 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4266 return -EILSEQ;
4267
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004268 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004269 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304270 if (rem < 0)
4271 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004272
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304273 data += (count - rem);
4274 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004275 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004276
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304277 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004278}
4279EXPORT_SYMBOL(hci_recv_fragment);
4280
Suraj Sumangala99811512010-07-14 13:02:19 +05304281#define STREAM_REASSEMBLY 0
4282
4283int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4284{
4285 int type;
4286 int rem = 0;
4287
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004288 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304289 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4290
4291 if (!skb) {
4292 struct { char type; } *pkt;
4293
4294 /* Start of the frame */
4295 pkt = data;
4296 type = pkt->type;
4297
4298 data++;
4299 count--;
4300 } else
4301 type = bt_cb(skb)->pkt_type;
4302
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004303 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004304 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304305 if (rem < 0)
4306 return rem;
4307
4308 data += (count - rem);
4309 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004310 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304311
4312 return rem;
4313}
4314EXPORT_SYMBOL(hci_recv_stream_fragment);
4315
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316/* ---- Interface to upper protocols ---- */
4317
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318int hci_register_cb(struct hci_cb *cb)
4319{
4320 BT_DBG("%p name %s", cb, cb->name);
4321
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004322 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004324 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 return 0;
4327}
4328EXPORT_SYMBOL(hci_register_cb);
4329
4330int hci_unregister_cb(struct hci_cb *cb)
4331{
4332 BT_DBG("%p name %s", cb, cb->name);
4333
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004334 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004336 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337
4338 return 0;
4339}
4340EXPORT_SYMBOL(hci_unregister_cb);
4341
Marcel Holtmann51086992013-10-10 14:54:19 -07004342static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004344 int err;
4345
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004346 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004348 /* Time stamp */
4349 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004351 /* Send copy to monitor */
4352 hci_send_to_monitor(hdev, skb);
4353
4354 if (atomic_read(&hdev->promisc)) {
4355 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004356 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 }
4358
4359 /* Get rid of skb owner, prior to sending to the driver. */
4360 skb_orphan(skb);
4361
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004362 err = hdev->send(hdev, skb);
4363 if (err < 0) {
4364 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4365 kfree_skb(skb);
4366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367}
4368
Johan Hedberg3119ae92013-03-05 20:37:44 +02004369void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4370{
4371 skb_queue_head_init(&req->cmd_q);
4372 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004373 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004374}
4375
4376int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4377{
4378 struct hci_dev *hdev = req->hdev;
4379 struct sk_buff *skb;
4380 unsigned long flags;
4381
4382 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4383
Andre Guedes5d73e032013-03-08 11:20:16 -03004384 /* If an error occured during request building, remove all HCI
4385 * commands queued on the HCI request queue.
4386 */
4387 if (req->err) {
4388 skb_queue_purge(&req->cmd_q);
4389 return req->err;
4390 }
4391
Johan Hedberg3119ae92013-03-05 20:37:44 +02004392 /* Do not allow empty requests */
4393 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004394 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004395
4396 skb = skb_peek_tail(&req->cmd_q);
4397 bt_cb(skb)->req.complete = complete;
4398
4399 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4400 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4401 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4402
4403 queue_work(hdev->workqueue, &hdev->cmd_work);
4404
4405 return 0;
4406}
4407
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004408static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004409 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410{
4411 int len = HCI_COMMAND_HDR_SIZE + plen;
4412 struct hci_command_hdr *hdr;
4413 struct sk_buff *skb;
4414
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004416 if (!skb)
4417 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418
4419 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004420 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 hdr->plen = plen;
4422
4423 if (plen)
4424 memcpy(skb_put(skb, plen), param, plen);
4425
4426 BT_DBG("skb len %d", skb->len);
4427
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004428 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004429
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004430 return skb;
4431}
4432
4433/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004434int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4435 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004436{
4437 struct sk_buff *skb;
4438
4439 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4440
4441 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4442 if (!skb) {
4443 BT_ERR("%s no memory for command", hdev->name);
4444 return -ENOMEM;
4445 }
4446
Johan Hedberg11714b32013-03-05 20:37:47 +02004447 /* Stand-alone HCI commands must be flaged as
4448 * single-command requests.
4449 */
4450 bt_cb(skb)->req.start = true;
4451
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004453 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454
4455 return 0;
4456}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457
Johan Hedberg71c76a12013-03-05 20:37:46 +02004458/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004459void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4460 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004461{
4462 struct hci_dev *hdev = req->hdev;
4463 struct sk_buff *skb;
4464
4465 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4466
Andre Guedes34739c12013-03-08 11:20:18 -03004467 /* If an error occured during request building, there is no point in
4468 * queueing the HCI command. We can simply return.
4469 */
4470 if (req->err)
4471 return;
4472
Johan Hedberg71c76a12013-03-05 20:37:46 +02004473 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4474 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004475 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4476 hdev->name, opcode);
4477 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004478 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004479 }
4480
4481 if (skb_queue_empty(&req->cmd_q))
4482 bt_cb(skb)->req.start = true;
4483
Johan Hedberg02350a72013-04-03 21:50:29 +03004484 bt_cb(skb)->req.event = event;
4485
Johan Hedberg71c76a12013-03-05 20:37:46 +02004486 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004487}
4488
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004489void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4490 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004491{
4492 hci_req_add_ev(req, opcode, plen, param, 0);
4493}
4494
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004496void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004497{
4498 struct hci_command_hdr *hdr;
4499
4500 if (!hdev->sent_cmd)
4501 return NULL;
4502
4503 hdr = (void *) hdev->sent_cmd->data;
4504
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004505 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 return NULL;
4507
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004508 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509
4510 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4511}
4512
4513/* Send ACL data */
4514static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4515{
4516 struct hci_acl_hdr *hdr;
4517 int len = skb->len;
4518
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004519 skb_push(skb, HCI_ACL_HDR_SIZE);
4520 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004521 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004522 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4523 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524}
4525
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004526static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004527 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004529 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004530 struct hci_dev *hdev = conn->hdev;
4531 struct sk_buff *list;
4532
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004533 skb->len = skb_headlen(skb);
4534 skb->data_len = 0;
4535
4536 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004537
4538 switch (hdev->dev_type) {
4539 case HCI_BREDR:
4540 hci_add_acl_hdr(skb, conn->handle, flags);
4541 break;
4542 case HCI_AMP:
4543 hci_add_acl_hdr(skb, chan->handle, flags);
4544 break;
4545 default:
4546 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4547 return;
4548 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004549
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004550 list = skb_shinfo(skb)->frag_list;
4551 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 /* Non fragmented */
4553 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4554
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004555 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 } else {
4557 /* Fragmented */
4558 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4559
4560 skb_shinfo(skb)->frag_list = NULL;
4561
4562 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004563 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004565 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004566
4567 flags &= ~ACL_START;
4568 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 do {
4570 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004571
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004572 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004573 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574
4575 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4576
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004577 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578 } while (list);
4579
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004580 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582}
4583
4584void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4585{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004586 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004587
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004588 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004589
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004590 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004591
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004592 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004593}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594
4595/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004596void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597{
4598 struct hci_dev *hdev = conn->hdev;
4599 struct hci_sco_hdr hdr;
4600
4601 BT_DBG("%s len %d", hdev->name, skb->len);
4602
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004603 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604 hdr.dlen = skb->len;
4605
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004606 skb_push(skb, HCI_SCO_HDR_SIZE);
4607 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004608 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004610 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004611
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004613 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
4616/* ---- HCI TX task (outgoing data) ---- */
4617
4618/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004619static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4620 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621{
4622 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004623 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004624 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004625
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004626 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004627 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004628
4629 rcu_read_lock();
4630
4631 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004632 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004634
4635 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4636 continue;
4637
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638 num++;
4639
4640 if (c->sent < min) {
4641 min = c->sent;
4642 conn = c;
4643 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004644
4645 if (hci_conn_num(hdev, type) == num)
4646 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004647 }
4648
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004649 rcu_read_unlock();
4650
Linus Torvalds1da177e2005-04-16 15:20:36 -07004651 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004652 int cnt, q;
4653
4654 switch (conn->type) {
4655 case ACL_LINK:
4656 cnt = hdev->acl_cnt;
4657 break;
4658 case SCO_LINK:
4659 case ESCO_LINK:
4660 cnt = hdev->sco_cnt;
4661 break;
4662 case LE_LINK:
4663 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4664 break;
4665 default:
4666 cnt = 0;
4667 BT_ERR("Unknown link type");
4668 }
4669
4670 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 *quote = q ? q : 1;
4672 } else
4673 *quote = 0;
4674
4675 BT_DBG("conn %p quote %d", conn, *quote);
4676 return conn;
4677}
4678
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004679static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004680{
4681 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004682 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683
Ville Tervobae1f5d92011-02-10 22:38:53 -03004684 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004686 rcu_read_lock();
4687
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004689 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004690 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004691 BT_ERR("%s killing stalled connection %pMR",
4692 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004693 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004694 }
4695 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004696
4697 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698}
4699
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004700static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4701 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004702{
4703 struct hci_conn_hash *h = &hdev->conn_hash;
4704 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004705 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004706 struct hci_conn *conn;
4707 int cnt, q, conn_num = 0;
4708
4709 BT_DBG("%s", hdev->name);
4710
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004711 rcu_read_lock();
4712
4713 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004714 struct hci_chan *tmp;
4715
4716 if (conn->type != type)
4717 continue;
4718
4719 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4720 continue;
4721
4722 conn_num++;
4723
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004724 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004725 struct sk_buff *skb;
4726
4727 if (skb_queue_empty(&tmp->data_q))
4728 continue;
4729
4730 skb = skb_peek(&tmp->data_q);
4731 if (skb->priority < cur_prio)
4732 continue;
4733
4734 if (skb->priority > cur_prio) {
4735 num = 0;
4736 min = ~0;
4737 cur_prio = skb->priority;
4738 }
4739
4740 num++;
4741
4742 if (conn->sent < min) {
4743 min = conn->sent;
4744 chan = tmp;
4745 }
4746 }
4747
4748 if (hci_conn_num(hdev, type) == conn_num)
4749 break;
4750 }
4751
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004752 rcu_read_unlock();
4753
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004754 if (!chan)
4755 return NULL;
4756
4757 switch (chan->conn->type) {
4758 case ACL_LINK:
4759 cnt = hdev->acl_cnt;
4760 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004761 case AMP_LINK:
4762 cnt = hdev->block_cnt;
4763 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004764 case SCO_LINK:
4765 case ESCO_LINK:
4766 cnt = hdev->sco_cnt;
4767 break;
4768 case LE_LINK:
4769 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4770 break;
4771 default:
4772 cnt = 0;
4773 BT_ERR("Unknown link type");
4774 }
4775
4776 q = cnt / num;
4777 *quote = q ? q : 1;
4778 BT_DBG("chan %p quote %d", chan, *quote);
4779 return chan;
4780}
4781
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004782static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4783{
4784 struct hci_conn_hash *h = &hdev->conn_hash;
4785 struct hci_conn *conn;
4786 int num = 0;
4787
4788 BT_DBG("%s", hdev->name);
4789
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004790 rcu_read_lock();
4791
4792 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004793 struct hci_chan *chan;
4794
4795 if (conn->type != type)
4796 continue;
4797
4798 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4799 continue;
4800
4801 num++;
4802
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004803 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004804 struct sk_buff *skb;
4805
4806 if (chan->sent) {
4807 chan->sent = 0;
4808 continue;
4809 }
4810
4811 if (skb_queue_empty(&chan->data_q))
4812 continue;
4813
4814 skb = skb_peek(&chan->data_q);
4815 if (skb->priority >= HCI_PRIO_MAX - 1)
4816 continue;
4817
4818 skb->priority = HCI_PRIO_MAX - 1;
4819
4820 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004821 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004822 }
4823
4824 if (hci_conn_num(hdev, type) == num)
4825 break;
4826 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004827
4828 rcu_read_unlock();
4829
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004830}
4831
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004832static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4833{
4834 /* Calculate count of blocks used by this packet */
4835 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4836}
4837
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004838static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004840 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841 /* ACL tx timeout must be longer than maximum
4842 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004843 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004844 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004845 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004846 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004847}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004848
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004849static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004850{
4851 unsigned int cnt = hdev->acl_cnt;
4852 struct hci_chan *chan;
4853 struct sk_buff *skb;
4854 int quote;
4855
4856 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004857
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004858 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004859 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004860 u32 priority = (skb_peek(&chan->data_q))->priority;
4861 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004862 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004863 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004864
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004865 /* Stop if priority has changed */
4866 if (skb->priority < priority)
4867 break;
4868
4869 skb = skb_dequeue(&chan->data_q);
4870
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004871 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004872 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004873
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004874 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875 hdev->acl_last_tx = jiffies;
4876
4877 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004878 chan->sent++;
4879 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880 }
4881 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004882
4883 if (cnt != hdev->acl_cnt)
4884 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885}
4886
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004887static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004888{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004889 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004890 struct hci_chan *chan;
4891 struct sk_buff *skb;
4892 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004893 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004894
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004895 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004896
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004897 BT_DBG("%s", hdev->name);
4898
4899 if (hdev->dev_type == HCI_AMP)
4900 type = AMP_LINK;
4901 else
4902 type = ACL_LINK;
4903
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004904 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004905 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004906 u32 priority = (skb_peek(&chan->data_q))->priority;
4907 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4908 int blocks;
4909
4910 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004911 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004912
4913 /* Stop if priority has changed */
4914 if (skb->priority < priority)
4915 break;
4916
4917 skb = skb_dequeue(&chan->data_q);
4918
4919 blocks = __get_blocks(hdev, skb);
4920 if (blocks > hdev->block_cnt)
4921 return;
4922
4923 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004924 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004925
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004926 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004927 hdev->acl_last_tx = jiffies;
4928
4929 hdev->block_cnt -= blocks;
4930 quote -= blocks;
4931
4932 chan->sent += blocks;
4933 chan->conn->sent += blocks;
4934 }
4935 }
4936
4937 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004938 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004939}
4940
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004941static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004942{
4943 BT_DBG("%s", hdev->name);
4944
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004945 /* No ACL link over BR/EDR controller */
4946 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4947 return;
4948
4949 /* No AMP link over AMP controller */
4950 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004951 return;
4952
4953 switch (hdev->flow_ctl_mode) {
4954 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4955 hci_sched_acl_pkt(hdev);
4956 break;
4957
4958 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4959 hci_sched_acl_blk(hdev);
4960 break;
4961 }
4962}
4963
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004965static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966{
4967 struct hci_conn *conn;
4968 struct sk_buff *skb;
4969 int quote;
4970
4971 BT_DBG("%s", hdev->name);
4972
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004973 if (!hci_conn_num(hdev, SCO_LINK))
4974 return;
4975
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4977 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4978 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004979 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980
4981 conn->sent++;
4982 if (conn->sent == ~0)
4983 conn->sent = 0;
4984 }
4985 }
4986}
4987
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004988static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004989{
4990 struct hci_conn *conn;
4991 struct sk_buff *skb;
4992 int quote;
4993
4994 BT_DBG("%s", hdev->name);
4995
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004996 if (!hci_conn_num(hdev, ESCO_LINK))
4997 return;
4998
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004999 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5000 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005001 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5002 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005003 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005004
5005 conn->sent++;
5006 if (conn->sent == ~0)
5007 conn->sent = 0;
5008 }
5009 }
5010}
5011
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005012static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005013{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005014 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005015 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005016 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005017
5018 BT_DBG("%s", hdev->name);
5019
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005020 if (!hci_conn_num(hdev, LE_LINK))
5021 return;
5022
Marcel Holtmann4a964402014-07-02 19:10:33 +02005023 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005024 /* LE tx timeout must be longer than maximum
5025 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005026 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005027 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005028 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005029 }
5030
5031 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005032 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005033 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005034 u32 priority = (skb_peek(&chan->data_q))->priority;
5035 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005036 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005037 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005038
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005039 /* Stop if priority has changed */
5040 if (skb->priority < priority)
5041 break;
5042
5043 skb = skb_dequeue(&chan->data_q);
5044
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005045 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005046 hdev->le_last_tx = jiffies;
5047
5048 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005049 chan->sent++;
5050 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005051 }
5052 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005053
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005054 if (hdev->le_pkts)
5055 hdev->le_cnt = cnt;
5056 else
5057 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005058
5059 if (cnt != tmp)
5060 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005061}
5062
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005063static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005064{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005065 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066 struct sk_buff *skb;
5067
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005068 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005069 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070
Marcel Holtmann52de5992013-09-03 18:08:38 -07005071 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5072 /* Schedule queues and send stuff to HCI driver */
5073 hci_sched_acl(hdev);
5074 hci_sched_sco(hdev);
5075 hci_sched_esco(hdev);
5076 hci_sched_le(hdev);
5077 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005078
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079 /* Send next queued raw (unknown type) packet */
5080 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005081 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082}
5083
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005084/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085
5086/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005087static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005088{
5089 struct hci_acl_hdr *hdr = (void *) skb->data;
5090 struct hci_conn *conn;
5091 __u16 handle, flags;
5092
5093 skb_pull(skb, HCI_ACL_HDR_SIZE);
5094
5095 handle = __le16_to_cpu(hdr->handle);
5096 flags = hci_flags(handle);
5097 handle = hci_handle(handle);
5098
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005099 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005100 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101
5102 hdev->stat.acl_rx++;
5103
5104 hci_dev_lock(hdev);
5105 conn = hci_conn_hash_lookup_handle(hdev, handle);
5106 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005107
Linus Torvalds1da177e2005-04-16 15:20:36 -07005108 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005109 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005110
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005112 l2cap_recv_acldata(conn, skb, flags);
5113 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005115 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005116 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 }
5118
5119 kfree_skb(skb);
5120}
5121
5122/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005123static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124{
5125 struct hci_sco_hdr *hdr = (void *) skb->data;
5126 struct hci_conn *conn;
5127 __u16 handle;
5128
5129 skb_pull(skb, HCI_SCO_HDR_SIZE);
5130
5131 handle = __le16_to_cpu(hdr->handle);
5132
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005133 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134
5135 hdev->stat.sco_rx++;
5136
5137 hci_dev_lock(hdev);
5138 conn = hci_conn_hash_lookup_handle(hdev, handle);
5139 hci_dev_unlock(hdev);
5140
5141 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005142 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005143 sco_recv_scodata(conn, skb);
5144 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005146 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005147 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148 }
5149
5150 kfree_skb(skb);
5151}
5152
Johan Hedberg9238f362013-03-05 20:37:48 +02005153static bool hci_req_is_complete(struct hci_dev *hdev)
5154{
5155 struct sk_buff *skb;
5156
5157 skb = skb_peek(&hdev->cmd_q);
5158 if (!skb)
5159 return true;
5160
5161 return bt_cb(skb)->req.start;
5162}
5163
Johan Hedberg42c6b122013-03-05 20:37:49 +02005164static void hci_resend_last(struct hci_dev *hdev)
5165{
5166 struct hci_command_hdr *sent;
5167 struct sk_buff *skb;
5168 u16 opcode;
5169
5170 if (!hdev->sent_cmd)
5171 return;
5172
5173 sent = (void *) hdev->sent_cmd->data;
5174 opcode = __le16_to_cpu(sent->opcode);
5175 if (opcode == HCI_OP_RESET)
5176 return;
5177
5178 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5179 if (!skb)
5180 return;
5181
5182 skb_queue_head(&hdev->cmd_q, skb);
5183 queue_work(hdev->workqueue, &hdev->cmd_work);
5184}
5185
Johan Hedberg9238f362013-03-05 20:37:48 +02005186void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5187{
5188 hci_req_complete_t req_complete = NULL;
5189 struct sk_buff *skb;
5190 unsigned long flags;
5191
5192 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5193
Johan Hedberg42c6b122013-03-05 20:37:49 +02005194 /* If the completed command doesn't match the last one that was
5195 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005196 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005197 if (!hci_sent_cmd_data(hdev, opcode)) {
5198 /* Some CSR based controllers generate a spontaneous
5199 * reset complete event during init and any pending
5200 * command will never be completed. In such a case we
5201 * need to resend whatever was the last sent
5202 * command.
5203 */
5204 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5205 hci_resend_last(hdev);
5206
Johan Hedberg9238f362013-03-05 20:37:48 +02005207 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005208 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005209
5210 /* If the command succeeded and there's still more commands in
5211 * this request the request is not yet complete.
5212 */
5213 if (!status && !hci_req_is_complete(hdev))
5214 return;
5215
5216 /* If this was the last command in a request the complete
5217 * callback would be found in hdev->sent_cmd instead of the
5218 * command queue (hdev->cmd_q).
5219 */
5220 if (hdev->sent_cmd) {
5221 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005222
5223 if (req_complete) {
5224 /* We must set the complete callback to NULL to
5225 * avoid calling the callback more than once if
5226 * this function gets called again.
5227 */
5228 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5229
Johan Hedberg9238f362013-03-05 20:37:48 +02005230 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005231 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005232 }
5233
5234 /* Remove all pending commands belonging to this request */
5235 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5236 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5237 if (bt_cb(skb)->req.start) {
5238 __skb_queue_head(&hdev->cmd_q, skb);
5239 break;
5240 }
5241
5242 req_complete = bt_cb(skb)->req.complete;
5243 kfree_skb(skb);
5244 }
5245 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5246
5247call_complete:
5248 if (req_complete)
5249 req_complete(hdev, status);
5250}
5251
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005252static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005254 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 struct sk_buff *skb;
5256
5257 BT_DBG("%s", hdev->name);
5258
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005260 /* Send copy to monitor */
5261 hci_send_to_monitor(hdev, skb);
5262
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 if (atomic_read(&hdev->promisc)) {
5264 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005265 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 }
5267
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005268 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 kfree_skb(skb);
5270 continue;
5271 }
5272
5273 if (test_bit(HCI_INIT, &hdev->flags)) {
5274 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005275 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005276 case HCI_ACLDATA_PKT:
5277 case HCI_SCODATA_PKT:
5278 kfree_skb(skb);
5279 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281 }
5282
5283 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005284 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005285 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005286 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005287 hci_event_packet(hdev, skb);
5288 break;
5289
5290 case HCI_ACLDATA_PKT:
5291 BT_DBG("%s ACL data packet", hdev->name);
5292 hci_acldata_packet(hdev, skb);
5293 break;
5294
5295 case HCI_SCODATA_PKT:
5296 BT_DBG("%s SCO data packet", hdev->name);
5297 hci_scodata_packet(hdev, skb);
5298 break;
5299
5300 default:
5301 kfree_skb(skb);
5302 break;
5303 }
5304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305}
5306
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005307static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005309 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 struct sk_buff *skb;
5311
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005312 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5313 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005314
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005316 if (atomic_read(&hdev->cmd_cnt)) {
5317 skb = skb_dequeue(&hdev->cmd_q);
5318 if (!skb)
5319 return;
5320
Wei Yongjun7585b972009-02-25 18:29:52 +08005321 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005323 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005324 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005326 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005327 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005328 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005329 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005330 schedule_delayed_work(&hdev->cmd_timer,
5331 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 } else {
5333 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005334 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005335 }
5336 }
5337}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005338
5339void hci_req_add_le_scan_disable(struct hci_request *req)
5340{
5341 struct hci_cp_le_set_scan_enable cp;
5342
5343 memset(&cp, 0, sizeof(cp));
5344 cp.enable = LE_SCAN_DISABLE;
5345 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5346}
Andre Guedesa4790db2014-02-26 20:21:47 -03005347
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005348void hci_req_add_le_passive_scan(struct hci_request *req)
5349{
5350 struct hci_cp_le_set_scan_param param_cp;
5351 struct hci_cp_le_set_scan_enable enable_cp;
5352 struct hci_dev *hdev = req->hdev;
5353 u8 own_addr_type;
5354
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005355 /* Set require_privacy to false since no SCAN_REQ are send
5356 * during passive scanning. Not using an unresolvable address
5357 * here is important so that peer devices using direct
5358 * advertising with our address will be correctly reported
5359 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005360 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005361 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005362 return;
5363
5364 memset(&param_cp, 0, sizeof(param_cp));
5365 param_cp.type = LE_SCAN_PASSIVE;
5366 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5367 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5368 param_cp.own_address_type = own_addr_type;
5369 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5370 &param_cp);
5371
5372 memset(&enable_cp, 0, sizeof(enable_cp));
5373 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005374 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005375 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5376 &enable_cp);
5377}
5378
Andre Guedesa4790db2014-02-26 20:21:47 -03005379static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5380{
5381 if (status)
5382 BT_DBG("HCI request failed to update background scanning: "
5383 "status 0x%2.2x", status);
5384}
5385
5386/* This function controls the background scanning based on hdev->pend_le_conns
5387 * list. If there are pending LE connection we start the background scanning,
5388 * otherwise we stop it.
5389 *
5390 * This function requires the caller holds hdev->lock.
5391 */
5392void hci_update_background_scan(struct hci_dev *hdev)
5393{
Andre Guedesa4790db2014-02-26 20:21:47 -03005394 struct hci_request req;
5395 struct hci_conn *conn;
5396 int err;
5397
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005398 if (!test_bit(HCI_UP, &hdev->flags) ||
5399 test_bit(HCI_INIT, &hdev->flags) ||
5400 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005401 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005402 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005403 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005404 return;
5405
Johan Hedberga70f4b52014-07-07 15:19:50 +03005406 /* No point in doing scanning if LE support hasn't been enabled */
5407 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5408 return;
5409
Johan Hedbergae23ada2014-07-07 13:24:59 +03005410 /* If discovery is active don't interfere with it */
5411 if (hdev->discovery.state != DISCOVERY_STOPPED)
5412 return;
5413
Andre Guedesa4790db2014-02-26 20:21:47 -03005414 hci_req_init(&req, hdev);
5415
Johan Hedberg2b7be332014-07-07 14:40:22 +03005416 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
5417 list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005418 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005419 /* If there is no pending LE connections or devices
5420 * to be scanned for, we should stop the background
5421 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005422 */
5423
5424 /* If controller is not scanning we are done. */
5425 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5426 return;
5427
5428 hci_req_add_le_scan_disable(&req);
5429
5430 BT_DBG("%s stopping background scanning", hdev->name);
5431 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005432 /* If there is at least one pending LE connection, we should
5433 * keep the background scan running.
5434 */
5435
Andre Guedesa4790db2014-02-26 20:21:47 -03005436 /* If controller is connecting, we should not start scanning
5437 * since some controllers are not able to scan and connect at
5438 * the same time.
5439 */
5440 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5441 if (conn)
5442 return;
5443
Andre Guedes4340a122014-03-10 18:26:24 -03005444 /* If controller is currently scanning, we stop it to ensure we
5445 * don't miss any advertising (due to duplicates filter).
5446 */
5447 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5448 hci_req_add_le_scan_disable(&req);
5449
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005450 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005451
5452 BT_DBG("%s starting background scanning", hdev->name);
5453 }
5454
5455 err = hci_req_run(&req, update_background_scan_complete);
5456 if (err)
5457 BT_ERR("Failed to run HCI request: err %d", err);
5458}