blob: fc7abd3c012dfeb1da454211599e9960eb87cea8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
1445
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450
1451 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
1462 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 struct hci_cp_write_le_host_supported cp;
1529
Johan Hedbergc73eee92013-04-19 18:35:21 +03001530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544}
1545
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001554 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001564 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001580 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001581 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001595 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608
Andre Guedes9193c6e2014-07-01 18:10:09 -03001609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
Andre Guedes9193c6e2014-07-01 18:10:09 -03001623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001627 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001637}
1638
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001647 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001648 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001650
1651 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001652 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659}
1660
Johan Hedberg2177bab2013-03-05 20:37:43 +02001661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001726 }
1727
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001728 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001735 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001736
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001746 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001789 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001790
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001791 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001792}
1793
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001794static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795{
1796 struct hci_dev *hdev = req->hdev;
1797
1798 BT_DBG("%s %ld", hdev->name, opt);
1799
1800 /* Reset */
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1803
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810}
1811
1812static int __hci_unconf_init(struct hci_dev *hdev)
1813{
1814 int err;
1815
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001816 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1817 return 0;
1818
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001819 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1820 if (err < 0)
1821 return err;
1822
1823 return 0;
1824}
1825
Johan Hedberg42c6b122013-03-05 20:37:49 +02001826static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827{
1828 __u8 scan = opt;
1829
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
1838 __u8 auth = opt;
1839
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
1842 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
Johan Hedberg42c6b122013-03-05 20:37:49 +02001846static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847{
1848 __u8 encrypt = opt;
1849
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001852 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001853 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854}
1855
Johan Hedberg42c6b122013-03-05 20:37:49 +02001856static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001857{
1858 __le16 policy = cpu_to_le16(opt);
1859
Johan Hedberg42c6b122013-03-05 20:37:49 +02001860 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001861
1862 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001863 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001864}
1865
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001866/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 * Device is held on return. */
1868struct hci_dev *hci_dev_get(int index)
1869{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001870 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
1872 BT_DBG("%d", index);
1873
1874 if (index < 0)
1875 return NULL;
1876
1877 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001878 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 if (d->id == index) {
1880 hdev = hci_dev_hold(d);
1881 break;
1882 }
1883 }
1884 read_unlock(&hci_dev_list_lock);
1885 return hdev;
1886}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001889
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001890bool hci_discovery_active(struct hci_dev *hdev)
1891{
1892 struct discovery_state *discov = &hdev->discovery;
1893
Andre Guedes6fbe1952012-02-03 17:47:58 -03001894 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001895 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001896 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001897 return true;
1898
Andre Guedes6fbe1952012-02-03 17:47:58 -03001899 default:
1900 return false;
1901 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001902}
1903
Johan Hedbergff9ef572012-01-04 14:23:45 +02001904void hci_discovery_set_state(struct hci_dev *hdev, int state)
1905{
1906 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1907
1908 if (hdev->discovery.state == state)
1909 return;
1910
1911 switch (state) {
1912 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001913 hci_update_background_scan(hdev);
1914
Andre Guedes7b99b652012-02-13 15:41:02 -03001915 if (hdev->discovery.state != DISCOVERY_STARTING)
1916 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001917 break;
1918 case DISCOVERY_STARTING:
1919 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001920 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001921 mgmt_discovering(hdev, 1);
1922 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001923 case DISCOVERY_RESOLVING:
1924 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001925 case DISCOVERY_STOPPING:
1926 break;
1927 }
1928
1929 hdev->discovery.state = state;
1930}
1931
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001932void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933{
Johan Hedberg30883512012-01-04 14:16:21 +02001934 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001935 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Johan Hedberg561aafb2012-01-04 13:31:59 +02001937 list_for_each_entry_safe(p, n, &cache->all, all) {
1938 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001939 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001941
1942 INIT_LIST_HEAD(&cache->unknown);
1943 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944}
1945
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001946struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1947 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948{
Johan Hedberg30883512012-01-04 14:16:21 +02001949 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 struct inquiry_entry *e;
1951
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001952 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953
Johan Hedberg561aafb2012-01-04 13:31:59 +02001954 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001956 return e;
1957 }
1958
1959 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960}
1961
Johan Hedberg561aafb2012-01-04 13:31:59 +02001962struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001963 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001964{
Johan Hedberg30883512012-01-04 14:16:21 +02001965 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966 struct inquiry_entry *e;
1967
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001968 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001969
1970 list_for_each_entry(e, &cache->unknown, list) {
1971 if (!bacmp(&e->data.bdaddr, bdaddr))
1972 return e;
1973 }
1974
1975 return NULL;
1976}
1977
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001978struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001979 bdaddr_t *bdaddr,
1980 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001981{
1982 struct discovery_state *cache = &hdev->discovery;
1983 struct inquiry_entry *e;
1984
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001985 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986
1987 list_for_each_entry(e, &cache->resolve, list) {
1988 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1989 return e;
1990 if (!bacmp(&e->data.bdaddr, bdaddr))
1991 return e;
1992 }
1993
1994 return NULL;
1995}
1996
Johan Hedberga3d4e202012-01-09 00:53:02 +02001997void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001998 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001999{
2000 struct discovery_state *cache = &hdev->discovery;
2001 struct list_head *pos = &cache->resolve;
2002 struct inquiry_entry *p;
2003
2004 list_del(&ie->list);
2005
2006 list_for_each_entry(p, &cache->resolve, list) {
2007 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002008 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002009 break;
2010 pos = &p->list;
2011 }
2012
2013 list_add(&ie->list, pos);
2014}
2015
Marcel Holtmannaf589252014-07-01 14:11:20 +02002016u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2017 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018{
Johan Hedberg30883512012-01-04 14:16:21 +02002019 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002020 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002021 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002023 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Szymon Janc2b2fec42012-11-20 11:38:54 +01002025 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2026
Marcel Holtmannaf589252014-07-01 14:11:20 +02002027 if (!data->ssp_mode)
2028 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002029
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002030 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002031 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002032 if (!ie->data.ssp_mode)
2033 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002034
Johan Hedberga3d4e202012-01-09 00:53:02 +02002035 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002036 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002037 ie->data.rssi = data->rssi;
2038 hci_inquiry_cache_update_resolve(hdev, ie);
2039 }
2040
Johan Hedberg561aafb2012-01-04 13:31:59 +02002041 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002042 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002043
Johan Hedberg561aafb2012-01-04 13:31:59 +02002044 /* Entry not in the cache. Add new one. */
2045 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002046 if (!ie) {
2047 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2048 goto done;
2049 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002050
2051 list_add(&ie->all, &cache->all);
2052
2053 if (name_known) {
2054 ie->name_state = NAME_KNOWN;
2055 } else {
2056 ie->name_state = NAME_NOT_KNOWN;
2057 list_add(&ie->list, &cache->unknown);
2058 }
2059
2060update:
2061 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002062 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002063 ie->name_state = NAME_KNOWN;
2064 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 }
2066
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002067 memcpy(&ie->data, data, sizeof(*data));
2068 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002070
2071 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002072 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002073
Marcel Holtmannaf589252014-07-01 14:11:20 +02002074done:
2075 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076}
2077
2078static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2079{
Johan Hedberg30883512012-01-04 14:16:21 +02002080 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 struct inquiry_info *info = (struct inquiry_info *) buf;
2082 struct inquiry_entry *e;
2083 int copied = 0;
2084
Johan Hedberg561aafb2012-01-04 13:31:59 +02002085 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002087
2088 if (copied >= num)
2089 break;
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 bacpy(&info->bdaddr, &data->bdaddr);
2092 info->pscan_rep_mode = data->pscan_rep_mode;
2093 info->pscan_period_mode = data->pscan_period_mode;
2094 info->pscan_mode = data->pscan_mode;
2095 memcpy(info->dev_class, data->dev_class, 3);
2096 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002099 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 }
2101
2102 BT_DBG("cache %p, copied %d", cache, copied);
2103 return copied;
2104}
2105
Johan Hedberg42c6b122013-03-05 20:37:49 +02002106static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107{
2108 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002109 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 struct hci_cp_inquiry cp;
2111
2112 BT_DBG("%s", hdev->name);
2113
2114 if (test_bit(HCI_INQUIRY, &hdev->flags))
2115 return;
2116
2117 /* Start Inquiry */
2118 memcpy(&cp.lap, &ir->lap, 3);
2119 cp.length = ir->length;
2120 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002121 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122}
2123
Andre Guedes3e13fa12013-03-27 20:04:56 -03002124static int wait_inquiry(void *word)
2125{
2126 schedule();
2127 return signal_pending(current);
2128}
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130int hci_inquiry(void __user *arg)
2131{
2132 __u8 __user *ptr = arg;
2133 struct hci_inquiry_req ir;
2134 struct hci_dev *hdev;
2135 int err = 0, do_inquiry = 0, max_rsp;
2136 long timeo;
2137 __u8 *buf;
2138
2139 if (copy_from_user(&ir, ptr, sizeof(ir)))
2140 return -EFAULT;
2141
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002142 hdev = hci_dev_get(ir.dev_id);
2143 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 return -ENODEV;
2145
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002146 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2147 err = -EBUSY;
2148 goto done;
2149 }
2150
Marcel Holtmann4a964402014-07-02 19:10:33 +02002151 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002152 err = -EOPNOTSUPP;
2153 goto done;
2154 }
2155
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002156 if (hdev->dev_type != HCI_BREDR) {
2157 err = -EOPNOTSUPP;
2158 goto done;
2159 }
2160
Johan Hedberg56f87902013-10-02 13:43:13 +03002161 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2162 err = -EOPNOTSUPP;
2163 goto done;
2164 }
2165
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002166 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002167 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002168 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002169 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 do_inquiry = 1;
2171 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002172 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Marcel Holtmann04837f62006-07-03 10:02:33 +02002174 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002175
2176 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002177 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2178 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002179 if (err < 0)
2180 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002181
2182 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2183 * cleared). If it is interrupted by a signal, return -EINTR.
2184 */
2185 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2186 TASK_INTERRUPTIBLE))
2187 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002188 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002190 /* for unlimited number of responses we will use buffer with
2191 * 255 entries
2192 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2194
2195 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2196 * copy it to the user space.
2197 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002198 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002199 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 err = -ENOMEM;
2201 goto done;
2202 }
2203
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002204 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002206 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 BT_DBG("num_rsp %d", ir.num_rsp);
2209
2210 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2211 ptr += sizeof(ir);
2212 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002213 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002215 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216 err = -EFAULT;
2217
2218 kfree(buf);
2219
2220done:
2221 hci_dev_put(hdev);
2222 return err;
2223}
2224
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002225static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 int ret = 0;
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 BT_DBG("%s %p", hdev->name, hdev);
2230
2231 hci_req_lock(hdev);
2232
Johan Hovold94324962012-03-15 14:48:41 +01002233 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2234 ret = -ENODEV;
2235 goto done;
2236 }
2237
Marcel Holtmannd603b762014-07-06 12:11:14 +02002238 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2239 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002240 /* Check for rfkill but allow the HCI setup stage to
2241 * proceed (which in itself doesn't cause any RF activity).
2242 */
2243 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244 ret = -ERFKILL;
2245 goto done;
2246 }
2247
2248 /* Check for valid public address or a configured static
2249 * random adddress, but let the HCI setup proceed to
2250 * be able to determine if there is a public address
2251 * or not.
2252 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002253 * In case of user channel usage, it is not important
2254 * if a public address or static random address is
2255 * available.
2256 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002257 * This check is only valid for BR/EDR controllers
2258 * since AMP controllers do not have an address.
2259 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002260 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264 ret = -EADDRNOTAVAIL;
2265 goto done;
2266 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002267 }
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 if (test_bit(HCI_UP, &hdev->flags)) {
2270 ret = -EALREADY;
2271 goto done;
2272 }
2273
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 if (hdev->open(hdev)) {
2275 ret = -EIO;
2276 goto done;
2277 }
2278
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002279 atomic_set(&hdev->cmd_cnt, 1);
2280 set_bit(HCI_INIT, &hdev->flags);
2281
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002282 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2283 if (hdev->setup)
2284 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002285
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002286 /* The transport driver can set these quirks before
2287 * creating the HCI device or in its setup callback.
2288 *
2289 * In case any of them is set, the controller has to
2290 * start up as unconfigured.
2291 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002292 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2293 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002294 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002295
2296 /* For an unconfigured controller it is required to
2297 * read at least the version information provided by
2298 * the Read Local Version Information command.
2299 *
2300 * If the set_bdaddr driver callback is provided, then
2301 * also the original Bluetooth public device address
2302 * will be read using the Read BD Address command.
2303 */
2304 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2305 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002306 }
2307
Marcel Holtmann9713c172014-07-06 12:11:15 +02002308 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2309 /* If public address change is configured, ensure that
2310 * the address gets programmed. If the driver does not
2311 * support changing the public address, fail the power
2312 * on procedure.
2313 */
2314 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2315 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002316 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2317 else
2318 ret = -EADDRNOTAVAIL;
2319 }
2320
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002321 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002322 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002323 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002324 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 }
2326
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002327 clear_bit(HCI_INIT, &hdev->flags);
2328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 if (!ret) {
2330 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002331 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 set_bit(HCI_UP, &hdev->flags);
2333 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002334 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002335 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002336 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002337 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002338 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002339 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002340 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002341 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002342 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002343 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002345 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002346 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002347 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348
2349 skb_queue_purge(&hdev->cmd_q);
2350 skb_queue_purge(&hdev->rx_q);
2351
2352 if (hdev->flush)
2353 hdev->flush(hdev);
2354
2355 if (hdev->sent_cmd) {
2356 kfree_skb(hdev->sent_cmd);
2357 hdev->sent_cmd = NULL;
2358 }
2359
2360 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002361 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 }
2363
2364done:
2365 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 return ret;
2367}
2368
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002369/* ---- HCI ioctl helpers ---- */
2370
2371int hci_dev_open(__u16 dev)
2372{
2373 struct hci_dev *hdev;
2374 int err;
2375
2376 hdev = hci_dev_get(dev);
2377 if (!hdev)
2378 return -ENODEV;
2379
Marcel Holtmann4a964402014-07-02 19:10:33 +02002380 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002381 * up as user channel. Trying to bring them up as normal devices
2382 * will result into a failure. Only user channel operation is
2383 * possible.
2384 *
2385 * When this function is called for a user channel, the flag
2386 * HCI_USER_CHANNEL will be set first before attempting to
2387 * open the device.
2388 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002390 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2391 err = -EOPNOTSUPP;
2392 goto done;
2393 }
2394
Johan Hedberge1d08f42013-10-01 22:44:50 +03002395 /* We need to ensure that no other power on/off work is pending
2396 * before proceeding to call hci_dev_do_open. This is
2397 * particularly important if the setup procedure has not yet
2398 * completed.
2399 */
2400 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2401 cancel_delayed_work(&hdev->power_off);
2402
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002403 /* After this call it is guaranteed that the setup procedure
2404 * has finished. This means that error conditions like RFKILL
2405 * or no valid public or static random address apply.
2406 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002407 flush_workqueue(hdev->req_workqueue);
2408
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002409 err = hci_dev_do_open(hdev);
2410
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002411done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002412 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002413 return err;
2414}
2415
Johan Hedbergd7347f32014-07-04 12:37:23 +03002416/* This function requires the caller holds hdev->lock */
2417static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2418{
2419 struct hci_conn_params *p;
2420
2421 list_for_each_entry(p, &hdev->le_conn_params, list)
2422 list_del_init(&p->action);
2423
2424 BT_DBG("All LE pending actions cleared");
2425}
2426
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427static int hci_dev_do_close(struct hci_dev *hdev)
2428{
2429 BT_DBG("%s %p", hdev->name, hdev);
2430
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002431 cancel_delayed_work(&hdev->power_off);
2432
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 hci_req_cancel(hdev, ENODEV);
2434 hci_req_lock(hdev);
2435
2436 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002437 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 hci_req_unlock(hdev);
2439 return 0;
2440 }
2441
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002442 /* Flush RX and TX works */
2443 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002444 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002446 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002447 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002448 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002449 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002450 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002451 }
2452
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002453 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002454 cancel_delayed_work(&hdev->service_cache);
2455
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002456 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002457
2458 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2459 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002460
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002461 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002462 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002464 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002465 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
2467 hci_notify(hdev, HCI_DEV_DOWN);
2468
2469 if (hdev->flush)
2470 hdev->flush(hdev);
2471
2472 /* Reset device */
2473 skb_queue_purge(&hdev->cmd_q);
2474 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002475 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2476 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002477 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002479 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 clear_bit(HCI_INIT, &hdev->flags);
2481 }
2482
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002483 /* flush cmd work */
2484 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485
2486 /* Drop queues */
2487 skb_queue_purge(&hdev->rx_q);
2488 skb_queue_purge(&hdev->cmd_q);
2489 skb_queue_purge(&hdev->raw_q);
2490
2491 /* Drop last sent command */
2492 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002493 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 kfree_skb(hdev->sent_cmd);
2495 hdev->sent_cmd = NULL;
2496 }
2497
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002498 kfree_skb(hdev->recv_evt);
2499 hdev->recv_evt = NULL;
2500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 /* After this point our queues are empty
2502 * and no tasks are scheduled. */
2503 hdev->close(hdev);
2504
Johan Hedberg35b973c2013-03-15 17:06:59 -05002505 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002506 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002507 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2508
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002509 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2510 if (hdev->dev_type == HCI_BREDR) {
2511 hci_dev_lock(hdev);
2512 mgmt_powered(hdev, 0);
2513 hci_dev_unlock(hdev);
2514 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002515 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002516
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002517 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002518 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002519
Johan Hedberge59fda82012-02-22 18:11:53 +02002520 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002521 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002522 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002523
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 hci_req_unlock(hdev);
2525
2526 hci_dev_put(hdev);
2527 return 0;
2528}
2529
2530int hci_dev_close(__u16 dev)
2531{
2532 struct hci_dev *hdev;
2533 int err;
2534
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002535 hdev = hci_dev_get(dev);
2536 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002538
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002539 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2540 err = -EBUSY;
2541 goto done;
2542 }
2543
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002544 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2545 cancel_delayed_work(&hdev->power_off);
2546
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002548
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002549done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 hci_dev_put(hdev);
2551 return err;
2552}
2553
2554int hci_dev_reset(__u16 dev)
2555{
2556 struct hci_dev *hdev;
2557 int ret = 0;
2558
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002559 hdev = hci_dev_get(dev);
2560 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 return -ENODEV;
2562
2563 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564
Marcel Holtmann808a0492013-08-26 20:57:58 -07002565 if (!test_bit(HCI_UP, &hdev->flags)) {
2566 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002570 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2571 ret = -EBUSY;
2572 goto done;
2573 }
2574
Marcel Holtmann4a964402014-07-02 19:10:33 +02002575 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002576 ret = -EOPNOTSUPP;
2577 goto done;
2578 }
2579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 /* Drop queues */
2581 skb_queue_purge(&hdev->rx_q);
2582 skb_queue_purge(&hdev->cmd_q);
2583
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002584 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002585 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002587 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589 if (hdev->flush)
2590 hdev->flush(hdev);
2591
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002592 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002593 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002595 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596
2597done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 hci_req_unlock(hdev);
2599 hci_dev_put(hdev);
2600 return ret;
2601}
2602
2603int hci_dev_reset_stat(__u16 dev)
2604{
2605 struct hci_dev *hdev;
2606 int ret = 0;
2607
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002608 hdev = hci_dev_get(dev);
2609 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 return -ENODEV;
2611
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002612 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2613 ret = -EBUSY;
2614 goto done;
2615 }
2616
Marcel Holtmann4a964402014-07-02 19:10:33 +02002617 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002618 ret = -EOPNOTSUPP;
2619 goto done;
2620 }
2621
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2623
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002624done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 return ret;
2627}
2628
2629int hci_dev_cmd(unsigned int cmd, void __user *arg)
2630{
2631 struct hci_dev *hdev;
2632 struct hci_dev_req dr;
2633 int err = 0;
2634
2635 if (copy_from_user(&dr, arg, sizeof(dr)))
2636 return -EFAULT;
2637
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002638 hdev = hci_dev_get(dr.dev_id);
2639 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 return -ENODEV;
2641
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002642 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2643 err = -EBUSY;
2644 goto done;
2645 }
2646
Marcel Holtmann4a964402014-07-02 19:10:33 +02002647 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002648 err = -EOPNOTSUPP;
2649 goto done;
2650 }
2651
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002652 if (hdev->dev_type != HCI_BREDR) {
2653 err = -EOPNOTSUPP;
2654 goto done;
2655 }
2656
Johan Hedberg56f87902013-10-02 13:43:13 +03002657 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2658 err = -EOPNOTSUPP;
2659 goto done;
2660 }
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662 switch (cmd) {
2663 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002664 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2665 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 break;
2667
2668 case HCISETENCRYPT:
2669 if (!lmp_encrypt_capable(hdev)) {
2670 err = -EOPNOTSUPP;
2671 break;
2672 }
2673
2674 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2675 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002676 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 if (err)
2679 break;
2680 }
2681
Johan Hedberg01178cd2013-03-05 20:37:41 +02002682 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2683 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 break;
2685
2686 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002687 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2688 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 break;
2690
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002691 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002692 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2693 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002694 break;
2695
2696 case HCISETLINKMODE:
2697 hdev->link_mode = ((__u16) dr.dev_opt) &
2698 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2699 break;
2700
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 case HCISETPTYPE:
2702 hdev->pkt_type = (__u16) dr.dev_opt;
2703 break;
2704
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002706 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2707 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708 break;
2709
2710 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002711 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2712 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 break;
2714
2715 default:
2716 err = -EINVAL;
2717 break;
2718 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002719
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002720done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 hci_dev_put(hdev);
2722 return err;
2723}
2724
2725int hci_get_dev_list(void __user *arg)
2726{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002727 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 struct hci_dev_list_req *dl;
2729 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 int n = 0, size, err;
2731 __u16 dev_num;
2732
2733 if (get_user(dev_num, (__u16 __user *) arg))
2734 return -EFAULT;
2735
2736 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2737 return -EINVAL;
2738
2739 size = sizeof(*dl) + dev_num * sizeof(*dr);
2740
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002741 dl = kzalloc(size, GFP_KERNEL);
2742 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 return -ENOMEM;
2744
2745 dr = dl->dev_req;
2746
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002747 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002748 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002749 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002750 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002751
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002752 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2753 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 (dr + n)->dev_id = hdev->id;
2756 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 if (++n >= dev_num)
2759 break;
2760 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002761 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
2763 dl->dev_num = n;
2764 size = sizeof(*dl) + n * sizeof(*dr);
2765
2766 err = copy_to_user(arg, dl, size);
2767 kfree(dl);
2768
2769 return err ? -EFAULT : 0;
2770}
2771
2772int hci_get_dev_info(void __user *arg)
2773{
2774 struct hci_dev *hdev;
2775 struct hci_dev_info di;
2776 int err = 0;
2777
2778 if (copy_from_user(&di, arg, sizeof(di)))
2779 return -EFAULT;
2780
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002781 hdev = hci_dev_get(di.dev_id);
2782 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 return -ENODEV;
2784
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002785 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002786 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002787
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002788 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2789 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 strcpy(di.name, hdev->name);
2792 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002793 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 di.flags = hdev->flags;
2795 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002796 if (lmp_bredr_capable(hdev)) {
2797 di.acl_mtu = hdev->acl_mtu;
2798 di.acl_pkts = hdev->acl_pkts;
2799 di.sco_mtu = hdev->sco_mtu;
2800 di.sco_pkts = hdev->sco_pkts;
2801 } else {
2802 di.acl_mtu = hdev->le_mtu;
2803 di.acl_pkts = hdev->le_pkts;
2804 di.sco_mtu = 0;
2805 di.sco_pkts = 0;
2806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 di.link_policy = hdev->link_policy;
2808 di.link_mode = hdev->link_mode;
2809
2810 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2811 memcpy(&di.features, &hdev->features, sizeof(di.features));
2812
2813 if (copy_to_user(arg, &di, sizeof(di)))
2814 err = -EFAULT;
2815
2816 hci_dev_put(hdev);
2817
2818 return err;
2819}
2820
2821/* ---- Interface to HCI drivers ---- */
2822
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002823static int hci_rfkill_set_block(void *data, bool blocked)
2824{
2825 struct hci_dev *hdev = data;
2826
2827 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2828
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002829 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2830 return -EBUSY;
2831
Johan Hedberg5e130362013-09-13 08:58:17 +03002832 if (blocked) {
2833 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002834 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2835 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002836 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002837 } else {
2838 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002839 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002840
2841 return 0;
2842}
2843
2844static const struct rfkill_ops hci_rfkill_ops = {
2845 .set_block = hci_rfkill_set_block,
2846};
2847
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002848static void hci_power_on(struct work_struct *work)
2849{
2850 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002851 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002852
2853 BT_DBG("%s", hdev->name);
2854
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002855 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002856 if (err < 0) {
2857 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002858 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002859 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002860
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002861 /* During the HCI setup phase, a few error conditions are
2862 * ignored and they need to be checked now. If they are still
2863 * valid, it is important to turn the device back off.
2864 */
2865 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002866 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002867 (hdev->dev_type == HCI_BREDR &&
2868 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2869 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002870 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2871 hci_dev_do_close(hdev);
2872 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002873 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2874 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002875 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002876
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002877 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002878 /* For unconfigured devices, set the HCI_RAW flag
2879 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002880 */
2881 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2882 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002883
2884 /* For fully configured devices, this will send
2885 * the Index Added event. For unconfigured devices,
2886 * it will send Unconfigued Index Added event.
2887 *
2888 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2889 * and no event will be send.
2890 */
2891 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002892 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002893 /* When the controller is now configured, then it
2894 * is important to clear the HCI_RAW flag.
2895 */
2896 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2897 clear_bit(HCI_RAW, &hdev->flags);
2898
Marcel Holtmannd603b762014-07-06 12:11:14 +02002899 /* Powering on the controller with HCI_CONFIG set only
2900 * happens with the transition from unconfigured to
2901 * configured. This will send the Index Added event.
2902 */
2903 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002904 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002905}
2906
2907static void hci_power_off(struct work_struct *work)
2908{
Johan Hedberg32435532011-11-07 22:16:04 +02002909 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002910 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002911
2912 BT_DBG("%s", hdev->name);
2913
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002914 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002915}
2916
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002917static void hci_discov_off(struct work_struct *work)
2918{
2919 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002920
2921 hdev = container_of(work, struct hci_dev, discov_off.work);
2922
2923 BT_DBG("%s", hdev->name);
2924
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002925 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002926}
2927
Johan Hedberg35f74982014-02-18 17:14:32 +02002928void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002929{
Johan Hedberg48210022013-01-27 00:31:28 +02002930 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002931
Johan Hedberg48210022013-01-27 00:31:28 +02002932 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2933 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002934 kfree(uuid);
2935 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002936}
2937
Johan Hedberg35f74982014-02-18 17:14:32 +02002938void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002939{
2940 struct list_head *p, *n;
2941
2942 list_for_each_safe(p, n, &hdev->link_keys) {
2943 struct link_key *key;
2944
2945 key = list_entry(p, struct link_key, list);
2946
2947 list_del(p);
2948 kfree(key);
2949 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002950}
2951
Johan Hedberg35f74982014-02-18 17:14:32 +02002952void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002953{
2954 struct smp_ltk *k, *tmp;
2955
2956 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2957 list_del(&k->list);
2958 kfree(k);
2959 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002960}
2961
Johan Hedberg970c4e42014-02-18 10:19:33 +02002962void hci_smp_irks_clear(struct hci_dev *hdev)
2963{
2964 struct smp_irk *k, *tmp;
2965
2966 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2967 list_del(&k->list);
2968 kfree(k);
2969 }
2970}
2971
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002972struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2973{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002974 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002975
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002976 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002977 if (bacmp(bdaddr, &k->bdaddr) == 0)
2978 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002979
2980 return NULL;
2981}
2982
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302983static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002984 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002985{
2986 /* Legacy key */
2987 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302988 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002989
2990 /* Debug keys are insecure so don't store them persistently */
2991 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302992 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002993
2994 /* Changed combination key and there's no previous one */
2995 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302996 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002997
2998 /* Security mode 3 case */
2999 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303000 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003001
3002 /* Neither local nor remote side had no-bonding as requirement */
3003 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303004 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003005
3006 /* Local side had dedicated bonding as requirement */
3007 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303008 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003009
3010 /* Remote side had dedicated bonding as requirement */
3011 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303012 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003013
3014 /* If none of the above criteria match, then don't store the key
3015 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303016 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003017}
3018
Johan Hedberg98a0b842014-01-30 19:40:00 -08003019static bool ltk_type_master(u8 type)
3020{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003021 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003022}
3023
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003024struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003025 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003026{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003027 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003029 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003030 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003031 continue;
3032
Johan Hedberg98a0b842014-01-30 19:40:00 -08003033 if (ltk_type_master(k->type) != master)
3034 continue;
3035
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003036 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003037 }
3038
3039 return NULL;
3040}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003042struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003043 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003044{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003045 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003046
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003047 list_for_each_entry(k, &hdev->long_term_keys, list)
3048 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003049 bacmp(bdaddr, &k->bdaddr) == 0 &&
3050 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003051 return k;
3052
3053 return NULL;
3054}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003055
Johan Hedberg970c4e42014-02-18 10:19:33 +02003056struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3057{
3058 struct smp_irk *irk;
3059
3060 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3061 if (!bacmp(&irk->rpa, rpa))
3062 return irk;
3063 }
3064
3065 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3066 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3067 bacpy(&irk->rpa, rpa);
3068 return irk;
3069 }
3070 }
3071
3072 return NULL;
3073}
3074
3075struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3076 u8 addr_type)
3077{
3078 struct smp_irk *irk;
3079
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003080 /* Identity Address must be public or static random */
3081 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3082 return NULL;
3083
Johan Hedberg970c4e42014-02-18 10:19:33 +02003084 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3085 if (addr_type == irk->addr_type &&
3086 bacmp(bdaddr, &irk->bdaddr) == 0)
3087 return irk;
3088 }
3089
3090 return NULL;
3091}
3092
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003093struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003094 bdaddr_t *bdaddr, u8 *val, u8 type,
3095 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003096{
3097 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303098 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003099
3100 old_key = hci_find_link_key(hdev, bdaddr);
3101 if (old_key) {
3102 old_key_type = old_key->type;
3103 key = old_key;
3104 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003105 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003106 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003107 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003108 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003109 list_add(&key->list, &hdev->link_keys);
3110 }
3111
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003112 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003113
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003114 /* Some buggy controller combinations generate a changed
3115 * combination key for legacy pairing even when there's no
3116 * previous key */
3117 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003118 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003119 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003120 if (conn)
3121 conn->key_type = type;
3122 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003123
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003124 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003125 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003126 key->pin_len = pin_len;
3127
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003128 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003129 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003130 else
3131 key->type = type;
3132
Johan Hedberg7652ff62014-06-24 13:15:49 +03003133 if (persistent)
3134 *persistent = hci_persistent_key(hdev, conn, type,
3135 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003136
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003137 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003138}
3139
Johan Hedbergca9142b2014-02-19 14:57:44 +02003140struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003141 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003142 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003143{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003144 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003145 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003146
Johan Hedberg98a0b842014-01-30 19:40:00 -08003147 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003148 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003149 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003150 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003151 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003152 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003153 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003154 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003155 }
3156
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003157 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003158 key->bdaddr_type = addr_type;
3159 memcpy(key->val, tk, sizeof(key->val));
3160 key->authenticated = authenticated;
3161 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003162 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003163 key->enc_size = enc_size;
3164 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003165
Johan Hedbergca9142b2014-02-19 14:57:44 +02003166 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003167}
3168
Johan Hedbergca9142b2014-02-19 14:57:44 +02003169struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3170 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003171{
3172 struct smp_irk *irk;
3173
3174 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3175 if (!irk) {
3176 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3177 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003178 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003179
3180 bacpy(&irk->bdaddr, bdaddr);
3181 irk->addr_type = addr_type;
3182
3183 list_add(&irk->list, &hdev->identity_resolving_keys);
3184 }
3185
3186 memcpy(irk->val, val, 16);
3187 bacpy(&irk->rpa, rpa);
3188
Johan Hedbergca9142b2014-02-19 14:57:44 +02003189 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003190}
3191
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003192int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3193{
3194 struct link_key *key;
3195
3196 key = hci_find_link_key(hdev, bdaddr);
3197 if (!key)
3198 return -ENOENT;
3199
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003200 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003201
3202 list_del(&key->list);
3203 kfree(key);
3204
3205 return 0;
3206}
3207
Johan Hedberge0b2b272014-02-18 17:14:31 +02003208int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003209{
3210 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003211 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003212
3213 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003214 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003215 continue;
3216
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003217 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003218
3219 list_del(&k->list);
3220 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003221 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003222 }
3223
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003224 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003225}
3226
Johan Hedberga7ec7332014-02-18 17:14:35 +02003227void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3228{
3229 struct smp_irk *k, *tmp;
3230
Johan Hedberg668b7b12014-02-21 16:03:31 +02003231 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003232 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3233 continue;
3234
3235 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3236
3237 list_del(&k->list);
3238 kfree(k);
3239 }
3240}
3241
Ville Tervo6bd32322011-02-16 16:32:41 +02003242/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003243static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003244{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003245 struct hci_dev *hdev = container_of(work, struct hci_dev,
3246 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003247
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003248 if (hdev->sent_cmd) {
3249 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3250 u16 opcode = __le16_to_cpu(sent->opcode);
3251
3252 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3253 } else {
3254 BT_ERR("%s command tx timeout", hdev->name);
3255 }
3256
Ville Tervo6bd32322011-02-16 16:32:41 +02003257 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003258 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003259}
3260
Szymon Janc2763eda2011-03-22 13:12:22 +01003261struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003262 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003263{
3264 struct oob_data *data;
3265
3266 list_for_each_entry(data, &hdev->remote_oob_data, list)
3267 if (bacmp(bdaddr, &data->bdaddr) == 0)
3268 return data;
3269
3270 return NULL;
3271}
3272
3273int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3274{
3275 struct oob_data *data;
3276
3277 data = hci_find_remote_oob_data(hdev, bdaddr);
3278 if (!data)
3279 return -ENOENT;
3280
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003281 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003282
3283 list_del(&data->list);
3284 kfree(data);
3285
3286 return 0;
3287}
3288
Johan Hedberg35f74982014-02-18 17:14:32 +02003289void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003290{
3291 struct oob_data *data, *n;
3292
3293 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3294 list_del(&data->list);
3295 kfree(data);
3296 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003297}
3298
Marcel Holtmann07988722014-01-10 02:07:29 -08003299int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3300 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003301{
3302 struct oob_data *data;
3303
3304 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003305 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003306 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003307 if (!data)
3308 return -ENOMEM;
3309
3310 bacpy(&data->bdaddr, bdaddr);
3311 list_add(&data->list, &hdev->remote_oob_data);
3312 }
3313
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003314 memcpy(data->hash192, hash, sizeof(data->hash192));
3315 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003316
Marcel Holtmann07988722014-01-10 02:07:29 -08003317 memset(data->hash256, 0, sizeof(data->hash256));
3318 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3319
3320 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3321
3322 return 0;
3323}
3324
3325int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3326 u8 *hash192, u8 *randomizer192,
3327 u8 *hash256, u8 *randomizer256)
3328{
3329 struct oob_data *data;
3330
3331 data = hci_find_remote_oob_data(hdev, bdaddr);
3332 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003333 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003334 if (!data)
3335 return -ENOMEM;
3336
3337 bacpy(&data->bdaddr, bdaddr);
3338 list_add(&data->list, &hdev->remote_oob_data);
3339 }
3340
3341 memcpy(data->hash192, hash192, sizeof(data->hash192));
3342 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3343
3344 memcpy(data->hash256, hash256, sizeof(data->hash256));
3345 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3346
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003347 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003348
3349 return 0;
3350}
3351
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003352struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3353 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003354{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003355 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003356
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003357 list_for_each_entry(b, &hdev->blacklist, list) {
3358 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003359 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003360 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003361
3362 return NULL;
3363}
3364
Marcel Holtmannc9507492014-02-27 19:35:54 -08003365static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003366{
3367 struct list_head *p, *n;
3368
3369 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003370 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003371
3372 list_del(p);
3373 kfree(b);
3374 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003375}
3376
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003377int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003378{
3379 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003380
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003381 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003382 return -EBADF;
3383
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003384 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003385 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003386
3387 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003388 if (!entry)
3389 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003390
3391 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003392 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003393
3394 list_add(&entry->list, &hdev->blacklist);
3395
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003396 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003397}
3398
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003399int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003400{
3401 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003402
Johan Hedberg35f74982014-02-18 17:14:32 +02003403 if (!bacmp(bdaddr, BDADDR_ANY)) {
3404 hci_blacklist_clear(hdev);
3405 return 0;
3406 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003407
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003408 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003409 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003410 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003411
3412 list_del(&entry->list);
3413 kfree(entry);
3414
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003415 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003416}
3417
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003418struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3419 bdaddr_t *bdaddr, u8 type)
3420{
3421 struct bdaddr_list *b;
3422
3423 list_for_each_entry(b, &hdev->le_white_list, list) {
3424 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3425 return b;
3426 }
3427
3428 return NULL;
3429}
3430
3431void hci_white_list_clear(struct hci_dev *hdev)
3432{
3433 struct list_head *p, *n;
3434
3435 list_for_each_safe(p, n, &hdev->le_white_list) {
3436 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3437
3438 list_del(p);
3439 kfree(b);
3440 }
3441}
3442
3443int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3444{
3445 struct bdaddr_list *entry;
3446
3447 if (!bacmp(bdaddr, BDADDR_ANY))
3448 return -EBADF;
3449
3450 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3451 if (!entry)
3452 return -ENOMEM;
3453
3454 bacpy(&entry->bdaddr, bdaddr);
3455 entry->bdaddr_type = type;
3456
3457 list_add(&entry->list, &hdev->le_white_list);
3458
3459 return 0;
3460}
3461
3462int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3463{
3464 struct bdaddr_list *entry;
3465
3466 if (!bacmp(bdaddr, BDADDR_ANY))
3467 return -EBADF;
3468
3469 entry = hci_white_list_lookup(hdev, bdaddr, type);
3470 if (!entry)
3471 return -ENOENT;
3472
3473 list_del(&entry->list);
3474 kfree(entry);
3475
3476 return 0;
3477}
3478
Andre Guedes15819a72014-02-03 13:56:18 -03003479/* This function requires the caller holds hdev->lock */
3480struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3481 bdaddr_t *addr, u8 addr_type)
3482{
3483 struct hci_conn_params *params;
3484
Johan Hedberg738f6182014-07-03 19:33:51 +03003485 /* The conn params list only contains identity addresses */
3486 if (!hci_is_identity_address(addr, addr_type))
3487 return NULL;
3488
Andre Guedes15819a72014-02-03 13:56:18 -03003489 list_for_each_entry(params, &hdev->le_conn_params, list) {
3490 if (bacmp(&params->addr, addr) == 0 &&
3491 params->addr_type == addr_type) {
3492 return params;
3493 }
3494 }
3495
3496 return NULL;
3497}
3498
Andre Guedescef952c2014-02-26 20:21:49 -03003499static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3500{
3501 struct hci_conn *conn;
3502
3503 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3504 if (!conn)
3505 return false;
3506
3507 if (conn->dst_type != type)
3508 return false;
3509
3510 if (conn->state != BT_CONNECTED)
3511 return false;
3512
3513 return true;
3514}
3515
Andre Guedes15819a72014-02-03 13:56:18 -03003516/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003517struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3518 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003519{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003520 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003521
Johan Hedberg738f6182014-07-03 19:33:51 +03003522 /* The list only contains identity addresses */
3523 if (!hci_is_identity_address(addr, addr_type))
3524 return NULL;
3525
Johan Hedberg501f8822014-07-04 12:37:26 +03003526 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003527 if (bacmp(&param->addr, addr) == 0 &&
3528 param->addr_type == addr_type)
3529 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003530 }
3531
3532 return NULL;
3533}
3534
3535/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003536struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3537 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003538{
3539 struct hci_conn_params *params;
3540
Johan Hedbergc46245b2014-07-02 17:37:33 +03003541 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003542 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003543
3544 params = hci_conn_params_lookup(hdev, addr, addr_type);
3545 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003546 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003547
3548 params = kzalloc(sizeof(*params), GFP_KERNEL);
3549 if (!params) {
3550 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003551 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003552 }
3553
3554 bacpy(&params->addr, addr);
3555 params->addr_type = addr_type;
3556
3557 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003558 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003559
3560 params->conn_min_interval = hdev->le_conn_min_interval;
3561 params->conn_max_interval = hdev->le_conn_max_interval;
3562 params->conn_latency = hdev->le_conn_latency;
3563 params->supervision_timeout = hdev->le_supv_timeout;
3564 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3565
3566 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3567
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003568 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003569}
3570
3571/* This function requires the caller holds hdev->lock */
3572int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003573 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003574{
3575 struct hci_conn_params *params;
3576
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003577 params = hci_conn_params_add(hdev, addr, addr_type);
3578 if (!params)
3579 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003580
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003581 if (params->auto_connect == auto_connect)
3582 return 0;
3583
Johan Hedberg95305ba2014-07-04 12:37:21 +03003584 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003585
Andre Guedescef952c2014-02-26 20:21:49 -03003586 switch (auto_connect) {
3587 case HCI_AUTO_CONN_DISABLED:
3588 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003589 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003590 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003591 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003592 list_add(&params->action, &hdev->pend_le_reports);
3593 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003594 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003595 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003596 if (!is_connected(hdev, addr, addr_type)) {
3597 list_add(&params->action, &hdev->pend_le_conns);
3598 hci_update_background_scan(hdev);
3599 }
Andre Guedescef952c2014-02-26 20:21:49 -03003600 break;
3601 }
Andre Guedes15819a72014-02-03 13:56:18 -03003602
Johan Hedberg851efca2014-07-02 22:42:00 +03003603 params->auto_connect = auto_connect;
3604
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003605 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3606 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003607
3608 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003609}
3610
3611/* This function requires the caller holds hdev->lock */
3612void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3613{
3614 struct hci_conn_params *params;
3615
3616 params = hci_conn_params_lookup(hdev, addr, addr_type);
3617 if (!params)
3618 return;
3619
Johan Hedberg95305ba2014-07-04 12:37:21 +03003620 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003621 list_del(&params->list);
3622 kfree(params);
3623
Johan Hedberg95305ba2014-07-04 12:37:21 +03003624 hci_update_background_scan(hdev);
3625
Andre Guedes15819a72014-02-03 13:56:18 -03003626 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3627}
3628
3629/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003630void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3631{
3632 struct hci_conn_params *params, *tmp;
3633
3634 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3635 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3636 continue;
3637 list_del(&params->list);
3638 kfree(params);
3639 }
3640
3641 BT_DBG("All LE disabled connection parameters were removed");
3642}
3643
3644/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003645void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003646{
3647 struct hci_conn_params *params, *tmp;
3648
3649 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003650 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003651 list_del(&params->list);
3652 kfree(params);
3653 }
3654
Johan Hedberga2f41a82014-07-04 12:37:19 +03003655 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003656
Andre Guedes15819a72014-02-03 13:56:18 -03003657 BT_DBG("All LE connection parameters were removed");
3658}
3659
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003660static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003661{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003662 if (status) {
3663 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003664
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003665 hci_dev_lock(hdev);
3666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3667 hci_dev_unlock(hdev);
3668 return;
3669 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003670}
3671
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003672static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003673{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003674 /* General inquiry access code (GIAC) */
3675 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3676 struct hci_request req;
3677 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003678 int err;
3679
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003680 if (status) {
3681 BT_ERR("Failed to disable LE scanning: status %d", status);
3682 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003683 }
3684
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003685 switch (hdev->discovery.type) {
3686 case DISCOV_TYPE_LE:
3687 hci_dev_lock(hdev);
3688 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3689 hci_dev_unlock(hdev);
3690 break;
3691
3692 case DISCOV_TYPE_INTERLEAVED:
3693 hci_req_init(&req, hdev);
3694
3695 memset(&cp, 0, sizeof(cp));
3696 memcpy(&cp.lap, lap, sizeof(cp.lap));
3697 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3698 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3699
3700 hci_dev_lock(hdev);
3701
3702 hci_inquiry_cache_flush(hdev);
3703
3704 err = hci_req_run(&req, inquiry_complete);
3705 if (err) {
3706 BT_ERR("Inquiry request failed: err %d", err);
3707 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3708 }
3709
3710 hci_dev_unlock(hdev);
3711 break;
3712 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003713}
3714
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003715static void le_scan_disable_work(struct work_struct *work)
3716{
3717 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003718 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003719 struct hci_request req;
3720 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003721
3722 BT_DBG("%s", hdev->name);
3723
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003724 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003725
Andre Guedesb1efcc22014-02-26 20:21:40 -03003726 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003727
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003728 err = hci_req_run(&req, le_scan_disable_work_complete);
3729 if (err)
3730 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003731}
3732
Johan Hedberg8d972502014-02-28 12:54:14 +02003733static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3734{
3735 struct hci_dev *hdev = req->hdev;
3736
3737 /* If we're advertising or initiating an LE connection we can't
3738 * go ahead and change the random address at this time. This is
3739 * because the eventual initiator address used for the
3740 * subsequently created connection will be undefined (some
3741 * controllers use the new address and others the one we had
3742 * when the operation started).
3743 *
3744 * In this kind of scenario skip the update and let the random
3745 * address be updated at the next cycle.
3746 */
3747 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3748 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3749 BT_DBG("Deferring random address update");
3750 return;
3751 }
3752
3753 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3754}
3755
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003756int hci_update_random_address(struct hci_request *req, bool require_privacy,
3757 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003758{
3759 struct hci_dev *hdev = req->hdev;
3760 int err;
3761
3762 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003763 * current RPA has expired or there is something else than
3764 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003765 */
3766 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003767 int to;
3768
3769 *own_addr_type = ADDR_LE_DEV_RANDOM;
3770
3771 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003772 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003773 return 0;
3774
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003775 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003776 if (err < 0) {
3777 BT_ERR("%s failed to generate new RPA", hdev->name);
3778 return err;
3779 }
3780
Johan Hedberg8d972502014-02-28 12:54:14 +02003781 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003782
3783 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3784 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3785
3786 return 0;
3787 }
3788
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003789 /* In case of required privacy without resolvable private address,
3790 * use an unresolvable private address. This is useful for active
3791 * scanning and non-connectable advertising.
3792 */
3793 if (require_privacy) {
3794 bdaddr_t urpa;
3795
3796 get_random_bytes(&urpa, 6);
3797 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3798
3799 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003800 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003801 return 0;
3802 }
3803
Johan Hedbergebd3a742014-02-23 19:42:21 +02003804 /* If forcing static address is in use or there is no public
3805 * address use the static address as random address (but skip
3806 * the HCI command if the current random address is already the
3807 * static one.
3808 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003809 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003810 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3811 *own_addr_type = ADDR_LE_DEV_RANDOM;
3812 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3813 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3814 &hdev->static_addr);
3815 return 0;
3816 }
3817
3818 /* Neither privacy nor static address is being used so use a
3819 * public address.
3820 */
3821 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3822
3823 return 0;
3824}
3825
Johan Hedberga1f4c312014-02-27 14:05:41 +02003826/* Copy the Identity Address of the controller.
3827 *
3828 * If the controller has a public BD_ADDR, then by default use that one.
3829 * If this is a LE only controller without a public address, default to
3830 * the static random address.
3831 *
3832 * For debugging purposes it is possible to force controllers with a
3833 * public address to use the static random address instead.
3834 */
3835void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3836 u8 *bdaddr_type)
3837{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003838 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003839 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3840 bacpy(bdaddr, &hdev->static_addr);
3841 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3842 } else {
3843 bacpy(bdaddr, &hdev->bdaddr);
3844 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3845 }
3846}
3847
David Herrmann9be0dab2012-04-22 14:39:57 +02003848/* Alloc HCI device */
3849struct hci_dev *hci_alloc_dev(void)
3850{
3851 struct hci_dev *hdev;
3852
3853 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3854 if (!hdev)
3855 return NULL;
3856
David Herrmannb1b813d2012-04-22 14:39:58 +02003857 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3858 hdev->esco_type = (ESCO_HV1);
3859 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003860 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3861 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003862 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003863 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3864 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003865
David Herrmannb1b813d2012-04-22 14:39:58 +02003866 hdev->sniff_max_interval = 800;
3867 hdev->sniff_min_interval = 80;
3868
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003869 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003870 hdev->le_scan_interval = 0x0060;
3871 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003872 hdev->le_conn_min_interval = 0x0028;
3873 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003874 hdev->le_conn_latency = 0x0000;
3875 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003876
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003877 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003878 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003879 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3880 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003881
David Herrmannb1b813d2012-04-22 14:39:58 +02003882 mutex_init(&hdev->lock);
3883 mutex_init(&hdev->req_lock);
3884
3885 INIT_LIST_HEAD(&hdev->mgmt_pending);
3886 INIT_LIST_HEAD(&hdev->blacklist);
3887 INIT_LIST_HEAD(&hdev->uuids);
3888 INIT_LIST_HEAD(&hdev->link_keys);
3889 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003890 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003891 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003892 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003893 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003894 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003895 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003896 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003897
3898 INIT_WORK(&hdev->rx_work, hci_rx_work);
3899 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3900 INIT_WORK(&hdev->tx_work, hci_tx_work);
3901 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003902
David Herrmannb1b813d2012-04-22 14:39:58 +02003903 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3904 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3905 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3906
David Herrmannb1b813d2012-04-22 14:39:58 +02003907 skb_queue_head_init(&hdev->rx_q);
3908 skb_queue_head_init(&hdev->cmd_q);
3909 skb_queue_head_init(&hdev->raw_q);
3910
3911 init_waitqueue_head(&hdev->req_wait_q);
3912
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003913 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003914
David Herrmannb1b813d2012-04-22 14:39:58 +02003915 hci_init_sysfs(hdev);
3916 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003917
3918 return hdev;
3919}
3920EXPORT_SYMBOL(hci_alloc_dev);
3921
3922/* Free HCI device */
3923void hci_free_dev(struct hci_dev *hdev)
3924{
David Herrmann9be0dab2012-04-22 14:39:57 +02003925 /* will free via device release */
3926 put_device(&hdev->dev);
3927}
3928EXPORT_SYMBOL(hci_free_dev);
3929
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930/* Register HCI device */
3931int hci_register_dev(struct hci_dev *hdev)
3932{
David Herrmannb1b813d2012-04-22 14:39:58 +02003933 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934
David Herrmann010666a2012-01-07 15:47:07 +01003935 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 return -EINVAL;
3937
Mat Martineau08add512011-11-02 16:18:36 -07003938 /* Do not allow HCI_AMP devices to register at index 0,
3939 * so the index can be used as the AMP controller ID.
3940 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003941 switch (hdev->dev_type) {
3942 case HCI_BREDR:
3943 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3944 break;
3945 case HCI_AMP:
3946 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3947 break;
3948 default:
3949 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003951
Sasha Levin3df92b32012-05-27 22:36:56 +02003952 if (id < 0)
3953 return id;
3954
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955 sprintf(hdev->name, "hci%d", id);
3956 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003957
3958 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3959
Kees Cookd8537542013-07-03 15:04:57 -07003960 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3961 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003962 if (!hdev->workqueue) {
3963 error = -ENOMEM;
3964 goto err;
3965 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003966
Kees Cookd8537542013-07-03 15:04:57 -07003967 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3968 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003969 if (!hdev->req_workqueue) {
3970 destroy_workqueue(hdev->workqueue);
3971 error = -ENOMEM;
3972 goto err;
3973 }
3974
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003975 if (!IS_ERR_OR_NULL(bt_debugfs))
3976 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3977
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003978 dev_set_name(&hdev->dev, "%s", hdev->name);
3979
Johan Hedberg99780a72014-02-18 10:40:07 +02003980 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3981 CRYPTO_ALG_ASYNC);
3982 if (IS_ERR(hdev->tfm_aes)) {
3983 BT_ERR("Unable to create crypto context");
3984 error = PTR_ERR(hdev->tfm_aes);
3985 hdev->tfm_aes = NULL;
3986 goto err_wqueue;
3987 }
3988
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003989 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003990 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003991 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003993 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003994 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3995 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003996 if (hdev->rfkill) {
3997 if (rfkill_register(hdev->rfkill) < 0) {
3998 rfkill_destroy(hdev->rfkill);
3999 hdev->rfkill = NULL;
4000 }
4001 }
4002
Johan Hedberg5e130362013-09-13 08:58:17 +03004003 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4004 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4005
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004006 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004007 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004008
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004009 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004010 /* Assume BR/EDR support until proven otherwise (such as
4011 * through reading supported features during init.
4012 */
4013 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4014 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004015
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004016 write_lock(&hci_dev_list_lock);
4017 list_add(&hdev->list, &hci_dev_list);
4018 write_unlock(&hci_dev_list_lock);
4019
Marcel Holtmann4a964402014-07-02 19:10:33 +02004020 /* Devices that are marked for raw-only usage are unconfigured
4021 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004022 */
4023 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004024 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004025
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004027 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028
Johan Hedberg19202572013-01-14 22:33:51 +02004029 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004030
Linus Torvalds1da177e2005-04-16 15:20:36 -07004031 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004032
Johan Hedberg99780a72014-02-18 10:40:07 +02004033err_tfm:
4034 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004035err_wqueue:
4036 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004037 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004038err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004039 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004040
David Herrmann33ca9542011-10-08 14:58:49 +02004041 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042}
4043EXPORT_SYMBOL(hci_register_dev);
4044
4045/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004046void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047{
Sasha Levin3df92b32012-05-27 22:36:56 +02004048 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004049
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004050 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
Johan Hovold94324962012-03-15 14:48:41 +01004052 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4053
Sasha Levin3df92b32012-05-27 22:36:56 +02004054 id = hdev->id;
4055
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004056 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004058 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
4060 hci_dev_do_close(hdev);
4061
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304062 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004063 kfree_skb(hdev->reassembly[i]);
4064
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004065 cancel_work_sync(&hdev->power_on);
4066
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004067 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004068 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4069 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004070 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004071 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004072 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004073 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004074
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004075 /* mgmt_index_removed should take care of emptying the
4076 * pending list */
4077 BUG_ON(!list_empty(&hdev->mgmt_pending));
4078
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 hci_notify(hdev, HCI_DEV_UNREG);
4080
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004081 if (hdev->rfkill) {
4082 rfkill_unregister(hdev->rfkill);
4083 rfkill_destroy(hdev->rfkill);
4084 }
4085
Johan Hedberg99780a72014-02-18 10:40:07 +02004086 if (hdev->tfm_aes)
4087 crypto_free_blkcipher(hdev->tfm_aes);
4088
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004089 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004090
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004091 debugfs_remove_recursive(hdev->debugfs);
4092
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004093 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004094 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004095
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004096 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004097 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004098 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004099 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004100 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004101 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004102 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004103 hci_white_list_clear(hdev);
Johan Hedberg373110c2014-07-02 17:37:25 +03004104 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004105 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004106
David Herrmanndc946bd2012-01-07 15:47:24 +01004107 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004108
4109 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110}
4111EXPORT_SYMBOL(hci_unregister_dev);
4112
4113/* Suspend HCI device */
4114int hci_suspend_dev(struct hci_dev *hdev)
4115{
4116 hci_notify(hdev, HCI_DEV_SUSPEND);
4117 return 0;
4118}
4119EXPORT_SYMBOL(hci_suspend_dev);
4120
4121/* Resume HCI device */
4122int hci_resume_dev(struct hci_dev *hdev)
4123{
4124 hci_notify(hdev, HCI_DEV_RESUME);
4125 return 0;
4126}
4127EXPORT_SYMBOL(hci_resume_dev);
4128
Marcel Holtmann76bca882009-11-18 00:40:39 +01004129/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004130int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004131{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004132 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004133 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004134 kfree_skb(skb);
4135 return -ENXIO;
4136 }
4137
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004138 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004139 bt_cb(skb)->incoming = 1;
4140
4141 /* Time stamp */
4142 __net_timestamp(skb);
4143
Marcel Holtmann76bca882009-11-18 00:40:39 +01004144 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004145 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004146
Marcel Holtmann76bca882009-11-18 00:40:39 +01004147 return 0;
4148}
4149EXPORT_SYMBOL(hci_recv_frame);
4150
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304151static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004152 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304153{
4154 int len = 0;
4155 int hlen = 0;
4156 int remain = count;
4157 struct sk_buff *skb;
4158 struct bt_skb_cb *scb;
4159
4160 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004161 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304162 return -EILSEQ;
4163
4164 skb = hdev->reassembly[index];
4165
4166 if (!skb) {
4167 switch (type) {
4168 case HCI_ACLDATA_PKT:
4169 len = HCI_MAX_FRAME_SIZE;
4170 hlen = HCI_ACL_HDR_SIZE;
4171 break;
4172 case HCI_EVENT_PKT:
4173 len = HCI_MAX_EVENT_SIZE;
4174 hlen = HCI_EVENT_HDR_SIZE;
4175 break;
4176 case HCI_SCODATA_PKT:
4177 len = HCI_MAX_SCO_SIZE;
4178 hlen = HCI_SCO_HDR_SIZE;
4179 break;
4180 }
4181
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004182 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304183 if (!skb)
4184 return -ENOMEM;
4185
4186 scb = (void *) skb->cb;
4187 scb->expect = hlen;
4188 scb->pkt_type = type;
4189
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304190 hdev->reassembly[index] = skb;
4191 }
4192
4193 while (count) {
4194 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004195 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304196
4197 memcpy(skb_put(skb, len), data, len);
4198
4199 count -= len;
4200 data += len;
4201 scb->expect -= len;
4202 remain = count;
4203
4204 switch (type) {
4205 case HCI_EVENT_PKT:
4206 if (skb->len == HCI_EVENT_HDR_SIZE) {
4207 struct hci_event_hdr *h = hci_event_hdr(skb);
4208 scb->expect = h->plen;
4209
4210 if (skb_tailroom(skb) < scb->expect) {
4211 kfree_skb(skb);
4212 hdev->reassembly[index] = NULL;
4213 return -ENOMEM;
4214 }
4215 }
4216 break;
4217
4218 case HCI_ACLDATA_PKT:
4219 if (skb->len == HCI_ACL_HDR_SIZE) {
4220 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4221 scb->expect = __le16_to_cpu(h->dlen);
4222
4223 if (skb_tailroom(skb) < scb->expect) {
4224 kfree_skb(skb);
4225 hdev->reassembly[index] = NULL;
4226 return -ENOMEM;
4227 }
4228 }
4229 break;
4230
4231 case HCI_SCODATA_PKT:
4232 if (skb->len == HCI_SCO_HDR_SIZE) {
4233 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4234 scb->expect = h->dlen;
4235
4236 if (skb_tailroom(skb) < scb->expect) {
4237 kfree_skb(skb);
4238 hdev->reassembly[index] = NULL;
4239 return -ENOMEM;
4240 }
4241 }
4242 break;
4243 }
4244
4245 if (scb->expect == 0) {
4246 /* Complete frame */
4247
4248 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004249 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304250
4251 hdev->reassembly[index] = NULL;
4252 return remain;
4253 }
4254 }
4255
4256 return remain;
4257}
4258
Marcel Holtmannef222012007-07-11 06:42:04 +02004259int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4260{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304261 int rem = 0;
4262
Marcel Holtmannef222012007-07-11 06:42:04 +02004263 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4264 return -EILSEQ;
4265
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004266 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004267 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304268 if (rem < 0)
4269 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004270
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304271 data += (count - rem);
4272 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004273 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004274
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304275 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004276}
4277EXPORT_SYMBOL(hci_recv_fragment);
4278
Suraj Sumangala99811512010-07-14 13:02:19 +05304279#define STREAM_REASSEMBLY 0
4280
4281int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4282{
4283 int type;
4284 int rem = 0;
4285
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004286 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304287 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4288
4289 if (!skb) {
4290 struct { char type; } *pkt;
4291
4292 /* Start of the frame */
4293 pkt = data;
4294 type = pkt->type;
4295
4296 data++;
4297 count--;
4298 } else
4299 type = bt_cb(skb)->pkt_type;
4300
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004301 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004302 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304303 if (rem < 0)
4304 return rem;
4305
4306 data += (count - rem);
4307 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004308 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304309
4310 return rem;
4311}
4312EXPORT_SYMBOL(hci_recv_stream_fragment);
4313
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314/* ---- Interface to upper protocols ---- */
4315
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316int hci_register_cb(struct hci_cb *cb)
4317{
4318 BT_DBG("%p name %s", cb, cb->name);
4319
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004320 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004322 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323
4324 return 0;
4325}
4326EXPORT_SYMBOL(hci_register_cb);
4327
4328int hci_unregister_cb(struct hci_cb *cb)
4329{
4330 BT_DBG("%p name %s", cb, cb->name);
4331
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004332 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004334 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335
4336 return 0;
4337}
4338EXPORT_SYMBOL(hci_unregister_cb);
4339
Marcel Holtmann51086992013-10-10 14:54:19 -07004340static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004341{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004342 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004344 /* Time stamp */
4345 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004347 /* Send copy to monitor */
4348 hci_send_to_monitor(hdev, skb);
4349
4350 if (atomic_read(&hdev->promisc)) {
4351 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004352 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353 }
4354
4355 /* Get rid of skb owner, prior to sending to the driver. */
4356 skb_orphan(skb);
4357
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004358 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004359 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360}
4361
Johan Hedberg3119ae92013-03-05 20:37:44 +02004362void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4363{
4364 skb_queue_head_init(&req->cmd_q);
4365 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004366 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004367}
4368
4369int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4370{
4371 struct hci_dev *hdev = req->hdev;
4372 struct sk_buff *skb;
4373 unsigned long flags;
4374
4375 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4376
Andre Guedes5d73e032013-03-08 11:20:16 -03004377 /* If an error occured during request building, remove all HCI
4378 * commands queued on the HCI request queue.
4379 */
4380 if (req->err) {
4381 skb_queue_purge(&req->cmd_q);
4382 return req->err;
4383 }
4384
Johan Hedberg3119ae92013-03-05 20:37:44 +02004385 /* Do not allow empty requests */
4386 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004387 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004388
4389 skb = skb_peek_tail(&req->cmd_q);
4390 bt_cb(skb)->req.complete = complete;
4391
4392 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4393 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4394 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4395
4396 queue_work(hdev->workqueue, &hdev->cmd_work);
4397
4398 return 0;
4399}
4400
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004401static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004402 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403{
4404 int len = HCI_COMMAND_HDR_SIZE + plen;
4405 struct hci_command_hdr *hdr;
4406 struct sk_buff *skb;
4407
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004409 if (!skb)
4410 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
4412 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004413 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 hdr->plen = plen;
4415
4416 if (plen)
4417 memcpy(skb_put(skb, plen), param, plen);
4418
4419 BT_DBG("skb len %d", skb->len);
4420
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004421 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004422
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004423 return skb;
4424}
4425
4426/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004427int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4428 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004429{
4430 struct sk_buff *skb;
4431
4432 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4433
4434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4435 if (!skb) {
4436 BT_ERR("%s no memory for command", hdev->name);
4437 return -ENOMEM;
4438 }
4439
Johan Hedberg11714b32013-03-05 20:37:47 +02004440 /* Stand-alone HCI commands must be flaged as
4441 * single-command requests.
4442 */
4443 bt_cb(skb)->req.start = true;
4444
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004446 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447
4448 return 0;
4449}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450
Johan Hedberg71c76a12013-03-05 20:37:46 +02004451/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004452void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4453 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004454{
4455 struct hci_dev *hdev = req->hdev;
4456 struct sk_buff *skb;
4457
4458 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4459
Andre Guedes34739c12013-03-08 11:20:18 -03004460 /* If an error occured during request building, there is no point in
4461 * queueing the HCI command. We can simply return.
4462 */
4463 if (req->err)
4464 return;
4465
Johan Hedberg71c76a12013-03-05 20:37:46 +02004466 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4467 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004468 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4469 hdev->name, opcode);
4470 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004471 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004472 }
4473
4474 if (skb_queue_empty(&req->cmd_q))
4475 bt_cb(skb)->req.start = true;
4476
Johan Hedberg02350a72013-04-03 21:50:29 +03004477 bt_cb(skb)->req.event = event;
4478
Johan Hedberg71c76a12013-03-05 20:37:46 +02004479 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004480}
4481
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004482void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4483 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004484{
4485 hci_req_add_ev(req, opcode, plen, param, 0);
4486}
4487
Linus Torvalds1da177e2005-04-16 15:20:36 -07004488/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004489void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004490{
4491 struct hci_command_hdr *hdr;
4492
4493 if (!hdev->sent_cmd)
4494 return NULL;
4495
4496 hdr = (void *) hdev->sent_cmd->data;
4497
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004498 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 return NULL;
4500
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004501 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502
4503 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4504}
4505
4506/* Send ACL data */
4507static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4508{
4509 struct hci_acl_hdr *hdr;
4510 int len = skb->len;
4511
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004512 skb_push(skb, HCI_ACL_HDR_SIZE);
4513 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004514 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004515 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4516 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517}
4518
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004519static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004520 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004521{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004522 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523 struct hci_dev *hdev = conn->hdev;
4524 struct sk_buff *list;
4525
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004526 skb->len = skb_headlen(skb);
4527 skb->data_len = 0;
4528
4529 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004530
4531 switch (hdev->dev_type) {
4532 case HCI_BREDR:
4533 hci_add_acl_hdr(skb, conn->handle, flags);
4534 break;
4535 case HCI_AMP:
4536 hci_add_acl_hdr(skb, chan->handle, flags);
4537 break;
4538 default:
4539 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4540 return;
4541 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004542
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004543 list = skb_shinfo(skb)->frag_list;
4544 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 /* Non fragmented */
4546 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4547
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004548 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 } else {
4550 /* Fragmented */
4551 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4552
4553 skb_shinfo(skb)->frag_list = NULL;
4554
4555 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004556 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004558 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004559
4560 flags &= ~ACL_START;
4561 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004562 do {
4563 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004564
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004565 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004566 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567
4568 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4569
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004570 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 } while (list);
4572
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004573 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004574 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004575}
4576
4577void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4578{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004579 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004580
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004581 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004582
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004583 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004585 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587
4588/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004589void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004590{
4591 struct hci_dev *hdev = conn->hdev;
4592 struct hci_sco_hdr hdr;
4593
4594 BT_DBG("%s len %d", hdev->name, skb->len);
4595
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004596 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597 hdr.dlen = skb->len;
4598
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004599 skb_push(skb, HCI_SCO_HDR_SIZE);
4600 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004601 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004603 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004604
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004606 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004607}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608
4609/* ---- HCI TX task (outgoing data) ---- */
4610
4611/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004612static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4613 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004614{
4615 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004616 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004617 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004619 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004621
4622 rcu_read_lock();
4623
4624 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004625 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004626 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004627
4628 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4629 continue;
4630
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 num++;
4632
4633 if (c->sent < min) {
4634 min = c->sent;
4635 conn = c;
4636 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004637
4638 if (hci_conn_num(hdev, type) == num)
4639 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004640 }
4641
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004642 rcu_read_unlock();
4643
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004645 int cnt, q;
4646
4647 switch (conn->type) {
4648 case ACL_LINK:
4649 cnt = hdev->acl_cnt;
4650 break;
4651 case SCO_LINK:
4652 case ESCO_LINK:
4653 cnt = hdev->sco_cnt;
4654 break;
4655 case LE_LINK:
4656 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4657 break;
4658 default:
4659 cnt = 0;
4660 BT_ERR("Unknown link type");
4661 }
4662
4663 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004664 *quote = q ? q : 1;
4665 } else
4666 *quote = 0;
4667
4668 BT_DBG("conn %p quote %d", conn, *quote);
4669 return conn;
4670}
4671
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004672static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673{
4674 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004675 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004676
Ville Tervobae1f5d92011-02-10 22:38:53 -03004677 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004679 rcu_read_lock();
4680
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004682 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004683 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004684 BT_ERR("%s killing stalled connection %pMR",
4685 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004686 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 }
4688 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004689
4690 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691}
4692
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004693static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4694 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004695{
4696 struct hci_conn_hash *h = &hdev->conn_hash;
4697 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004698 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004699 struct hci_conn *conn;
4700 int cnt, q, conn_num = 0;
4701
4702 BT_DBG("%s", hdev->name);
4703
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004704 rcu_read_lock();
4705
4706 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004707 struct hci_chan *tmp;
4708
4709 if (conn->type != type)
4710 continue;
4711
4712 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4713 continue;
4714
4715 conn_num++;
4716
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004717 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004718 struct sk_buff *skb;
4719
4720 if (skb_queue_empty(&tmp->data_q))
4721 continue;
4722
4723 skb = skb_peek(&tmp->data_q);
4724 if (skb->priority < cur_prio)
4725 continue;
4726
4727 if (skb->priority > cur_prio) {
4728 num = 0;
4729 min = ~0;
4730 cur_prio = skb->priority;
4731 }
4732
4733 num++;
4734
4735 if (conn->sent < min) {
4736 min = conn->sent;
4737 chan = tmp;
4738 }
4739 }
4740
4741 if (hci_conn_num(hdev, type) == conn_num)
4742 break;
4743 }
4744
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004745 rcu_read_unlock();
4746
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004747 if (!chan)
4748 return NULL;
4749
4750 switch (chan->conn->type) {
4751 case ACL_LINK:
4752 cnt = hdev->acl_cnt;
4753 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004754 case AMP_LINK:
4755 cnt = hdev->block_cnt;
4756 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004757 case SCO_LINK:
4758 case ESCO_LINK:
4759 cnt = hdev->sco_cnt;
4760 break;
4761 case LE_LINK:
4762 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4763 break;
4764 default:
4765 cnt = 0;
4766 BT_ERR("Unknown link type");
4767 }
4768
4769 q = cnt / num;
4770 *quote = q ? q : 1;
4771 BT_DBG("chan %p quote %d", chan, *quote);
4772 return chan;
4773}
4774
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004775static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4776{
4777 struct hci_conn_hash *h = &hdev->conn_hash;
4778 struct hci_conn *conn;
4779 int num = 0;
4780
4781 BT_DBG("%s", hdev->name);
4782
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004783 rcu_read_lock();
4784
4785 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004786 struct hci_chan *chan;
4787
4788 if (conn->type != type)
4789 continue;
4790
4791 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4792 continue;
4793
4794 num++;
4795
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004796 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004797 struct sk_buff *skb;
4798
4799 if (chan->sent) {
4800 chan->sent = 0;
4801 continue;
4802 }
4803
4804 if (skb_queue_empty(&chan->data_q))
4805 continue;
4806
4807 skb = skb_peek(&chan->data_q);
4808 if (skb->priority >= HCI_PRIO_MAX - 1)
4809 continue;
4810
4811 skb->priority = HCI_PRIO_MAX - 1;
4812
4813 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004814 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004815 }
4816
4817 if (hci_conn_num(hdev, type) == num)
4818 break;
4819 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004820
4821 rcu_read_unlock();
4822
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004823}
4824
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004825static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4826{
4827 /* Calculate count of blocks used by this packet */
4828 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4829}
4830
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004831static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004832{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004833 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 /* ACL tx timeout must be longer than maximum
4835 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004836 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004837 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004838 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004839 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004840}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004841
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004842static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004843{
4844 unsigned int cnt = hdev->acl_cnt;
4845 struct hci_chan *chan;
4846 struct sk_buff *skb;
4847 int quote;
4848
4849 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004850
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004851 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004852 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004853 u32 priority = (skb_peek(&chan->data_q))->priority;
4854 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004855 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004856 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004857
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004858 /* Stop if priority has changed */
4859 if (skb->priority < priority)
4860 break;
4861
4862 skb = skb_dequeue(&chan->data_q);
4863
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004864 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004865 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004866
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004867 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868 hdev->acl_last_tx = jiffies;
4869
4870 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004871 chan->sent++;
4872 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004873 }
4874 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004875
4876 if (cnt != hdev->acl_cnt)
4877 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878}
4879
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004880static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004881{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004882 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004883 struct hci_chan *chan;
4884 struct sk_buff *skb;
4885 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004886 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004887
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004888 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004889
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004890 BT_DBG("%s", hdev->name);
4891
4892 if (hdev->dev_type == HCI_AMP)
4893 type = AMP_LINK;
4894 else
4895 type = ACL_LINK;
4896
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004897 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004898 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004899 u32 priority = (skb_peek(&chan->data_q))->priority;
4900 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4901 int blocks;
4902
4903 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004904 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004905
4906 /* Stop if priority has changed */
4907 if (skb->priority < priority)
4908 break;
4909
4910 skb = skb_dequeue(&chan->data_q);
4911
4912 blocks = __get_blocks(hdev, skb);
4913 if (blocks > hdev->block_cnt)
4914 return;
4915
4916 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004917 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004918
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004919 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004920 hdev->acl_last_tx = jiffies;
4921
4922 hdev->block_cnt -= blocks;
4923 quote -= blocks;
4924
4925 chan->sent += blocks;
4926 chan->conn->sent += blocks;
4927 }
4928 }
4929
4930 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004931 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004932}
4933
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004934static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004935{
4936 BT_DBG("%s", hdev->name);
4937
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004938 /* No ACL link over BR/EDR controller */
4939 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4940 return;
4941
4942 /* No AMP link over AMP controller */
4943 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004944 return;
4945
4946 switch (hdev->flow_ctl_mode) {
4947 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4948 hci_sched_acl_pkt(hdev);
4949 break;
4950
4951 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4952 hci_sched_acl_blk(hdev);
4953 break;
4954 }
4955}
4956
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004958static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959{
4960 struct hci_conn *conn;
4961 struct sk_buff *skb;
4962 int quote;
4963
4964 BT_DBG("%s", hdev->name);
4965
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004966 if (!hci_conn_num(hdev, SCO_LINK))
4967 return;
4968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4970 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4971 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004972 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004973
4974 conn->sent++;
4975 if (conn->sent == ~0)
4976 conn->sent = 0;
4977 }
4978 }
4979}
4980
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004981static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004982{
4983 struct hci_conn *conn;
4984 struct sk_buff *skb;
4985 int quote;
4986
4987 BT_DBG("%s", hdev->name);
4988
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004989 if (!hci_conn_num(hdev, ESCO_LINK))
4990 return;
4991
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004992 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4993 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004994 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4995 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004996 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004997
4998 conn->sent++;
4999 if (conn->sent == ~0)
5000 conn->sent = 0;
5001 }
5002 }
5003}
5004
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005005static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005006{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005007 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005008 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005009 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005010
5011 BT_DBG("%s", hdev->name);
5012
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005013 if (!hci_conn_num(hdev, LE_LINK))
5014 return;
5015
Marcel Holtmann4a964402014-07-02 19:10:33 +02005016 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005017 /* LE tx timeout must be longer than maximum
5018 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005019 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005020 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005021 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005022 }
5023
5024 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005025 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005026 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005027 u32 priority = (skb_peek(&chan->data_q))->priority;
5028 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005029 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005030 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005031
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005032 /* Stop if priority has changed */
5033 if (skb->priority < priority)
5034 break;
5035
5036 skb = skb_dequeue(&chan->data_q);
5037
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005038 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005039 hdev->le_last_tx = jiffies;
5040
5041 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005042 chan->sent++;
5043 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005044 }
5045 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005046
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005047 if (hdev->le_pkts)
5048 hdev->le_cnt = cnt;
5049 else
5050 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005051
5052 if (cnt != tmp)
5053 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005054}
5055
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005056static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005057{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005058 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005059 struct sk_buff *skb;
5060
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005061 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005062 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005063
Marcel Holtmann52de5992013-09-03 18:08:38 -07005064 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5065 /* Schedule queues and send stuff to HCI driver */
5066 hci_sched_acl(hdev);
5067 hci_sched_sco(hdev);
5068 hci_sched_esco(hdev);
5069 hci_sched_le(hdev);
5070 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005071
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072 /* Send next queued raw (unknown type) packet */
5073 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005074 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075}
5076
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005077/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078
5079/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005080static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081{
5082 struct hci_acl_hdr *hdr = (void *) skb->data;
5083 struct hci_conn *conn;
5084 __u16 handle, flags;
5085
5086 skb_pull(skb, HCI_ACL_HDR_SIZE);
5087
5088 handle = __le16_to_cpu(hdr->handle);
5089 flags = hci_flags(handle);
5090 handle = hci_handle(handle);
5091
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005092 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005093 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094
5095 hdev->stat.acl_rx++;
5096
5097 hci_dev_lock(hdev);
5098 conn = hci_conn_hash_lookup_handle(hdev, handle);
5099 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005100
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005102 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005103
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005105 l2cap_recv_acldata(conn, skb, flags);
5106 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005108 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005109 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005110 }
5111
5112 kfree_skb(skb);
5113}
5114
5115/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005116static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117{
5118 struct hci_sco_hdr *hdr = (void *) skb->data;
5119 struct hci_conn *conn;
5120 __u16 handle;
5121
5122 skb_pull(skb, HCI_SCO_HDR_SIZE);
5123
5124 handle = __le16_to_cpu(hdr->handle);
5125
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005126 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127
5128 hdev->stat.sco_rx++;
5129
5130 hci_dev_lock(hdev);
5131 conn = hci_conn_hash_lookup_handle(hdev, handle);
5132 hci_dev_unlock(hdev);
5133
5134 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005136 sco_recv_scodata(conn, skb);
5137 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005139 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005140 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141 }
5142
5143 kfree_skb(skb);
5144}
5145
Johan Hedberg9238f362013-03-05 20:37:48 +02005146static bool hci_req_is_complete(struct hci_dev *hdev)
5147{
5148 struct sk_buff *skb;
5149
5150 skb = skb_peek(&hdev->cmd_q);
5151 if (!skb)
5152 return true;
5153
5154 return bt_cb(skb)->req.start;
5155}
5156
Johan Hedberg42c6b122013-03-05 20:37:49 +02005157static void hci_resend_last(struct hci_dev *hdev)
5158{
5159 struct hci_command_hdr *sent;
5160 struct sk_buff *skb;
5161 u16 opcode;
5162
5163 if (!hdev->sent_cmd)
5164 return;
5165
5166 sent = (void *) hdev->sent_cmd->data;
5167 opcode = __le16_to_cpu(sent->opcode);
5168 if (opcode == HCI_OP_RESET)
5169 return;
5170
5171 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5172 if (!skb)
5173 return;
5174
5175 skb_queue_head(&hdev->cmd_q, skb);
5176 queue_work(hdev->workqueue, &hdev->cmd_work);
5177}
5178
Johan Hedberg9238f362013-03-05 20:37:48 +02005179void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5180{
5181 hci_req_complete_t req_complete = NULL;
5182 struct sk_buff *skb;
5183 unsigned long flags;
5184
5185 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5186
Johan Hedberg42c6b122013-03-05 20:37:49 +02005187 /* If the completed command doesn't match the last one that was
5188 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005189 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005190 if (!hci_sent_cmd_data(hdev, opcode)) {
5191 /* Some CSR based controllers generate a spontaneous
5192 * reset complete event during init and any pending
5193 * command will never be completed. In such a case we
5194 * need to resend whatever was the last sent
5195 * command.
5196 */
5197 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5198 hci_resend_last(hdev);
5199
Johan Hedberg9238f362013-03-05 20:37:48 +02005200 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005201 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005202
5203 /* If the command succeeded and there's still more commands in
5204 * this request the request is not yet complete.
5205 */
5206 if (!status && !hci_req_is_complete(hdev))
5207 return;
5208
5209 /* If this was the last command in a request the complete
5210 * callback would be found in hdev->sent_cmd instead of the
5211 * command queue (hdev->cmd_q).
5212 */
5213 if (hdev->sent_cmd) {
5214 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005215
5216 if (req_complete) {
5217 /* We must set the complete callback to NULL to
5218 * avoid calling the callback more than once if
5219 * this function gets called again.
5220 */
5221 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5222
Johan Hedberg9238f362013-03-05 20:37:48 +02005223 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005224 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005225 }
5226
5227 /* Remove all pending commands belonging to this request */
5228 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5229 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5230 if (bt_cb(skb)->req.start) {
5231 __skb_queue_head(&hdev->cmd_q, skb);
5232 break;
5233 }
5234
5235 req_complete = bt_cb(skb)->req.complete;
5236 kfree_skb(skb);
5237 }
5238 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5239
5240call_complete:
5241 if (req_complete)
5242 req_complete(hdev, status);
5243}
5244
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005245static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005247 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005248 struct sk_buff *skb;
5249
5250 BT_DBG("%s", hdev->name);
5251
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005253 /* Send copy to monitor */
5254 hci_send_to_monitor(hdev, skb);
5255
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 if (atomic_read(&hdev->promisc)) {
5257 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005258 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 }
5260
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005261 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005262 kfree_skb(skb);
5263 continue;
5264 }
5265
5266 if (test_bit(HCI_INIT, &hdev->flags)) {
5267 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005268 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005269 case HCI_ACLDATA_PKT:
5270 case HCI_SCODATA_PKT:
5271 kfree_skb(skb);
5272 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 }
5275
5276 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005277 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005279 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005280 hci_event_packet(hdev, skb);
5281 break;
5282
5283 case HCI_ACLDATA_PKT:
5284 BT_DBG("%s ACL data packet", hdev->name);
5285 hci_acldata_packet(hdev, skb);
5286 break;
5287
5288 case HCI_SCODATA_PKT:
5289 BT_DBG("%s SCO data packet", hdev->name);
5290 hci_scodata_packet(hdev, skb);
5291 break;
5292
5293 default:
5294 kfree_skb(skb);
5295 break;
5296 }
5297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298}
5299
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005300static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005301{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005302 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005303 struct sk_buff *skb;
5304
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005305 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5306 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005307
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005309 if (atomic_read(&hdev->cmd_cnt)) {
5310 skb = skb_dequeue(&hdev->cmd_q);
5311 if (!skb)
5312 return;
5313
Wei Yongjun7585b972009-02-25 18:29:52 +08005314 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005316 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005317 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005318 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005319 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005320 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005321 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005322 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005323 schedule_delayed_work(&hdev->cmd_timer,
5324 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 } else {
5326 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005327 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005328 }
5329 }
5330}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005331
5332void hci_req_add_le_scan_disable(struct hci_request *req)
5333{
5334 struct hci_cp_le_set_scan_enable cp;
5335
5336 memset(&cp, 0, sizeof(cp));
5337 cp.enable = LE_SCAN_DISABLE;
5338 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5339}
Andre Guedesa4790db2014-02-26 20:21:47 -03005340
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005341void hci_req_add_le_passive_scan(struct hci_request *req)
5342{
5343 struct hci_cp_le_set_scan_param param_cp;
5344 struct hci_cp_le_set_scan_enable enable_cp;
5345 struct hci_dev *hdev = req->hdev;
5346 u8 own_addr_type;
5347
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005348 /* Set require_privacy to false since no SCAN_REQ are send
5349 * during passive scanning. Not using an unresolvable address
5350 * here is important so that peer devices using direct
5351 * advertising with our address will be correctly reported
5352 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005353 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005354 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005355 return;
5356
5357 memset(&param_cp, 0, sizeof(param_cp));
5358 param_cp.type = LE_SCAN_PASSIVE;
5359 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5360 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5361 param_cp.own_address_type = own_addr_type;
5362 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5363 &param_cp);
5364
5365 memset(&enable_cp, 0, sizeof(enable_cp));
5366 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005367 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005368 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5369 &enable_cp);
5370}
5371
Andre Guedesa4790db2014-02-26 20:21:47 -03005372static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5373{
5374 if (status)
5375 BT_DBG("HCI request failed to update background scanning: "
5376 "status 0x%2.2x", status);
5377}
5378
5379/* This function controls the background scanning based on hdev->pend_le_conns
5380 * list. If there are pending LE connection we start the background scanning,
5381 * otherwise we stop it.
5382 *
5383 * This function requires the caller holds hdev->lock.
5384 */
5385void hci_update_background_scan(struct hci_dev *hdev)
5386{
Andre Guedesa4790db2014-02-26 20:21:47 -03005387 struct hci_request req;
5388 struct hci_conn *conn;
5389 int err;
5390
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005391 if (!test_bit(HCI_UP, &hdev->flags) ||
5392 test_bit(HCI_INIT, &hdev->flags) ||
5393 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005394 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005395 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005396 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005397 return;
5398
Andre Guedesa4790db2014-02-26 20:21:47 -03005399 hci_req_init(&req, hdev);
5400
Johan Hedberg66f84552014-07-04 12:37:18 +03005401 if (list_empty(&hdev->pend_le_conns) &&
5402 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005403 /* If there is no pending LE connections or devices
5404 * to be scanned for, we should stop the background
5405 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005406 */
5407
5408 /* If controller is not scanning we are done. */
5409 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5410 return;
5411
5412 hci_req_add_le_scan_disable(&req);
5413
5414 BT_DBG("%s stopping background scanning", hdev->name);
5415 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005416 /* If there is at least one pending LE connection, we should
5417 * keep the background scan running.
5418 */
5419
Andre Guedesa4790db2014-02-26 20:21:47 -03005420 /* If controller is connecting, we should not start scanning
5421 * since some controllers are not able to scan and connect at
5422 * the same time.
5423 */
5424 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5425 if (conn)
5426 return;
5427
Andre Guedes4340a122014-03-10 18:26:24 -03005428 /* If controller is currently scanning, we stop it to ensure we
5429 * don't miss any advertising (due to duplicates filter).
5430 */
5431 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5432 hci_req_add_le_scan_disable(&req);
5433
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005434 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005435
5436 BT_DBG("%s starting background scanning", hdev->name);
5437 }
5438
5439 err = hci_req_run(&req, update_background_scan_complete);
5440 if (err)
5441 BT_ERR("Failed to run HCI request: err %d", err);
5442}