blob: b1d423efa109c977e8b620990e6b50fec71ed468 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/* ---- HCI notifications ---- */
58
Marcel Holtmann65164552005-10-28 19:20:48 +020059static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070060{
Marcel Holtmann040030e2012-02-20 14:50:37 +010061 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062}
63
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070064/* ---- HCI debugfs entries ---- */
65
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070066static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68{
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
Marcel Holtmann111902f2014-06-21 04:53:17 +020072 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070073 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76}
77
78static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80{
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
Marcel Holtmann111902f2014-06-21 04:53:17 +020098 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070099 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
Marcel Holtmann111902f2014-06-21 04:53:17 +0200119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700120
121 return count;
122}
123
124static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129};
130
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700131static int features_show(struct seq_file *f, void *ptr)
132{
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700152 hci_dev_unlock(hdev);
153
154 return 0;
155}
156
157static int features_open(struct inode *inode, struct file *file)
158{
159 return single_open(file, features_show, inode->i_private);
160}
161
162static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167};
168
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700169static int blacklist_show(struct seq_file *f, void *p)
170{
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700177 hci_dev_unlock(hdev);
178
179 return 0;
180}
181
182static int blacklist_open(struct inode *inode, struct file *file)
183{
184 return single_open(file, blacklist_show, inode->i_private);
185}
186
187static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192};
193
Marcel Holtmann47219832013-10-17 17:24:15 -0700194static int uuids_show(struct seq_file *f, void *p)
195{
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700201 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700202
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700209
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215}
216
217static int uuids_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, uuids_show, inode->i_private);
220}
221
222static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227};
228
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700229static int inquiry_cache_show(struct seq_file *f, void *p)
230{
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251}
252
253static int inquiry_cache_open(struct inode *inode, struct file *file)
254{
255 return single_open(file, inquiry_cache_show, inode->i_private);
256}
257
258static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263};
264
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700265static int link_keys_show(struct seq_file *f, void *ptr)
266{
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279}
280
281static int link_keys_open(struct inode *inode, struct file *file)
282{
283 return single_open(file, link_keys_show, inode->i_private);
284}
285
286static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291};
292
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700293static int dev_class_show(struct seq_file *f, void *ptr)
294{
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303}
304
305static int dev_class_open(struct inode *inode, struct file *file)
306{
307 return single_open(file, dev_class_show, inode->i_private);
308}
309
310static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315};
316
Marcel Holtmann041000b2013-10-17 12:02:31 -0700317static int voice_setting_get(void *data, u64 *val)
318{
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326}
327
328DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700331static int auto_accept_delay_set(void *data, u64 val)
332{
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340}
341
342static int auto_accept_delay_get(void *data, u64 *val)
343{
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351}
352
353DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800356static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358{
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
Marcel Holtmann111902f2014-06-21 04:53:17 +0200362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366}
367
368static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371{
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
Marcel Holtmann111902f2014-06-21 04:53:17 +0200387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800388 return -EALREADY;
389
Marcel Holtmann111902f2014-06-21 04:53:17 +0200390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800391
392 return count;
393}
394
395static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400};
401
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800402static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412}
413
414static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418};
419
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700420static int idle_timeout_set(void *data, u64 val)
421{
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700428 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429 hci_dev_unlock(hdev);
430
431 return 0;
432}
433
434static int idle_timeout_get(void *data, u64 *val)
435{
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443}
444
445DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200448static int rpa_timeout_set(void *data, u64 val)
449{
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463}
464
465static int rpa_timeout_get(void *data, u64 *val)
466{
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474}
475
476DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700479static int sniff_min_interval_set(void *data, u64 val)
480{
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700487 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493static int sniff_min_interval_get(void *data, u64 *val)
494{
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502}
503
504DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507static int sniff_max_interval_set(void *data, u64 val)
508{
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700515 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700516 hci_dev_unlock(hdev);
517
518 return 0;
519}
520
521static int sniff_max_interval_get(void *data, u64 *val)
522{
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530}
531
532DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200535static int conn_info_min_age_set(void *data, u64 val)
536{
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547}
548
549static int conn_info_min_age_get(void *data, u64 *val)
550{
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563static int conn_info_max_age_set(void *data, u64 val)
564{
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575}
576
577static int conn_info_max_age_get(void *data, u64 *val)
578{
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586}
587
588DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
Marcel Holtmannac345812014-02-23 12:44:25 -0800591static int identity_show(struct seq_file *f, void *p)
592{
593 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200594 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
Johan Hedberga1f4c312014-02-27 14:05:41 +0200599 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800600
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800602 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800603
604 hci_dev_unlock(hdev);
605
606 return 0;
607}
608
609static int identity_open(struct inode *inode, struct file *file)
610{
611 return single_open(file, identity_show, inode->i_private);
612}
613
614static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619};
620
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800621static int random_address_show(struct seq_file *f, void *p)
622{
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630}
631
632static int random_address_open(struct inode *inode, struct file *file)
633{
634 return single_open(file, random_address_show, inode->i_private);
635}
636
637static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642};
643
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700644static int static_address_show(struct seq_file *f, void *p)
645{
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655static int static_address_open(struct inode *inode, struct file *file)
656{
657 return single_open(file, static_address_show, inode->i_private);
658}
659
660static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665};
666
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800667static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700670{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700673
Marcel Holtmann111902f2014-06-21 04:53:17 +0200674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678}
679
680static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683{
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700697 return -EINVAL;
698
Marcel Holtmann111902f2014-06-21 04:53:17 +0200699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800700 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700701
Marcel Holtmann111902f2014-06-21 04:53:17 +0200702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800703
704 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700705}
706
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712};
Marcel Holtmann92202182013-10-18 16:38:10 -0700713
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800714static int white_list_show(struct seq_file *f, void *ptr)
715{
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725}
726
727static int white_list_open(struct inode *inode, struct file *file)
728{
729 return single_open(file, white_list_show, inode->i_private);
730}
731
732static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737};
738
Marcel Holtmann3698d702014-02-18 21:54:49 -0800739static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740{
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754}
755
756static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757{
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760}
761
762static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767};
768
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700769static int long_term_keys_show(struct seq_file *f, void *ptr)
770{
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800775 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800780 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785}
786
787static int long_term_keys_open(struct inode *inode, struct file *file)
788{
789 return single_open(file, long_term_keys_show, inode->i_private);
790}
791
792static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797};
798
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700799static int conn_min_interval_set(void *data, u64 val)
800{
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700807 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808 hci_dev_unlock(hdev);
809
810 return 0;
811}
812
813static int conn_min_interval_get(void *data, u64 *val)
814{
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822}
823
824DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827static int conn_max_interval_set(void *data, u64 val)
828{
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700835 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700836 hci_dev_unlock(hdev);
837
838 return 0;
839}
840
841static int conn_max_interval_get(void *data, u64 *val)
842{
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850}
851
852DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200855static int conn_latency_set(void *data, u64 val)
856{
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867}
868
869static int conn_latency_get(void *data, u64 *val)
870{
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878}
879
880DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
Marcel Holtmannf1649572014-06-30 12:34:38 +0200883static int supervision_timeout_set(void *data, u64 val)
884{
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895}
896
897static int supervision_timeout_get(void *data, u64 *val)
898{
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906}
907
908DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800911static int adv_channel_map_set(void *data, u64 val)
912{
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
925static int adv_channel_map_get(void *data, u64 *val)
926{
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934}
935
936DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200939static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300940{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200941 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300945 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300947 p->auto_connect);
948 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300949 hci_dev_unlock(hdev);
950
951 return 0;
952}
953
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200954static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300955{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200956 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300957}
958
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200959static const struct file_operations device_list_fops = {
960 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300961 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 .llseek = seq_lseek,
963 .release = single_release,
964};
965
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966/* ---- HCI requests ---- */
967
Johan Hedberg42c6b122013-03-05 20:37:49 +0200968static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977}
978
979static void hci_req_cancel(struct hci_dev *hdev, int err)
980{
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988}
989
Fengguang Wu77a63e02013-04-20 16:24:31 +0300990static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992{
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
Johan Hedberg75e84b72013-04-02 13:35:04 +03001021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043}
1044
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001045struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001046 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001047{
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001056 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001095 return hci_get_cmd_complete(hdev, opcode, event);
1096}
1097EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001100 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001101{
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001103}
1104EXPORT_SYMBOL(__hci_cmd_sync);
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001107static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001112 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
Johan Hedberg42c6b122013-03-05 20:37:49 +02001118 hci_req_init(&req, hdev);
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 hdev->req_status = HCI_REQ_PEND;
1121
Johan Hedberg42c6b122013-03-05 20:37:49 +02001122 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001123
Johan Hedberg42c6b122013-03-05 20:37:49 +02001124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001126 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001132 */
Andre Guedes920c8302013-03-08 11:20:15 -03001133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001137 }
1138
Andre Guedesbc4445c2013-03-08 11:20:13 -03001139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
Johan Hedberga5040ef2011-01-10 13:28:59 +02001163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168}
1169
Johan Hedberg01178cd2013-03-05 20:37:41 +02001170static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
1175 int ret;
1176
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 /* Serialize all requests */
1181 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001182 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_req_unlock(hdev);
1184
1185 return ret;
1186}
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195}
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001200
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001204 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001206
1207 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209}
1210
Johan Hedberg42c6b122013-03-05 20:37:49 +02001211static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001212{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001214
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001215 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001217
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001224 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001226
1227 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001229
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001235}
1236
Johan Hedberg42c6b122013-03-05 20:37:49 +02001237static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001238{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001239 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001245 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001249 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001250 break;
1251
1252 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001253 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001260}
1261
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001263{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001264 struct hci_dev *hdev = req->hdev;
1265
Johan Hedberg2177bab2013-03-05 20:37:43 +02001266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001271
1272 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001274
1275 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001277
1278 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001280
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
Johan Hedberg2177bab2013-03-05 20:37:43 +02001287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001292 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001294
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001302}
1303
Johan Hedberg42c6b122013-03-05 20:37:49 +02001304static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001305{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001306 struct hci_dev *hdev = req->hdev;
1307
Johan Hedberg2177bab2013-03-05 20:37:43 +02001308 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001310
1311 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
Johan Hedberg2177bab2013-03-05 20:37:43 +02001317 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001319
1320 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001322
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329}
1330
1331static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332{
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357}
1358
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360{
1361 u8 mode;
1362
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366}
1367
Johan Hedberg42c6b122013-03-05 20:37:49 +02001368static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001369{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001370 struct hci_dev *hdev = req->hdev;
1371
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001440}
1441
Johan Hedberg42c6b122013-03-05 20:37:49 +02001442static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001443{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001444 struct hci_dev *hdev = req->hdev;
1445
Johan Hedberg2177bab2013-03-05 20:37:43 +02001446 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001447 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001450
1451 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001452 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453
Johan Hedberg42c6b122013-03-05 20:37:49 +02001454 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001455
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001461
1462 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
Johan Hedberg42c6b122013-03-05 20:37:49 +02001481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001503 }
1504}
1505
Johan Hedberg42c6b122013-03-05 20:37:49 +02001506static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001507{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523}
1524
Johan Hedberg42c6b122013-03-05 20:37:49 +02001525static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001526{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528 struct hci_cp_write_le_host_supported cp;
1529
Johan Hedbergc73eee92013-04-19 18:35:21 +03001530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
Johan Hedberg2177bab2013-03-05 20:37:43 +02001534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001544}
1545
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001546static void hci_set_event_mask_page_2(struct hci_request *req)
1547{
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001554 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001564 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576}
1577
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001580 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001581 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001582
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001595 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001607 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001608
Andre Guedes9193c6e2014-07-01 18:10:09 -03001609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
Andre Guedes662bc2e2014-07-01 18:10:10 -03001614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
Andre Guedes9193c6e2014-07-01 18:10:09 -03001623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
Johan Hedberg42c6b122013-03-05 20:37:49 +02001626 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001627 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001637}
1638
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001639static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640{
1641 struct hci_dev *hdev = req->hdev;
1642
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001647 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001648 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001650
1651 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001652 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001659}
1660
Johan Hedberg2177bab2013-03-05 20:37:43 +02001661static int __hci_init(struct hci_dev *hdev)
1662{
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001726 }
1727
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001728 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001735 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001736
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001746 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001789 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001790
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001791 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001792}
1793
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001794static void hci_init0_req(struct hci_request *req, unsigned long opt)
1795{
1796 struct hci_dev *hdev = req->hdev;
1797
1798 BT_DBG("%s %ld", hdev->name, opt);
1799
1800 /* Reset */
1801 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1802 hci_reset_req(req, 0);
1803
1804 /* Read Local Version */
1805 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1806
1807 /* Read BD Address */
1808 if (hdev->set_bdaddr)
1809 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1810}
1811
1812static int __hci_unconf_init(struct hci_dev *hdev)
1813{
1814 int err;
1815
1816 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1817 if (err < 0)
1818 return err;
1819
1820 return 0;
1821}
1822
Johan Hedberg42c6b122013-03-05 20:37:49 +02001823static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824{
1825 __u8 scan = opt;
1826
Johan Hedberg42c6b122013-03-05 20:37:49 +02001827 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828
1829 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001830 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831}
1832
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834{
1835 __u8 auth = opt;
1836
Johan Hedberg42c6b122013-03-05 20:37:49 +02001837 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001840 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841}
1842
Johan Hedberg42c6b122013-03-05 20:37:49 +02001843static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844{
1845 __u8 encrypt = opt;
1846
Johan Hedberg42c6b122013-03-05 20:37:49 +02001847 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001849 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001850 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851}
1852
Johan Hedberg42c6b122013-03-05 20:37:49 +02001853static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001854{
1855 __le16 policy = cpu_to_le16(opt);
1856
Johan Hedberg42c6b122013-03-05 20:37:49 +02001857 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001858
1859 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001860 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001861}
1862
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001863/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 * Device is held on return. */
1865struct hci_dev *hci_dev_get(int index)
1866{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001867 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868
1869 BT_DBG("%d", index);
1870
1871 if (index < 0)
1872 return NULL;
1873
1874 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001875 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 if (d->id == index) {
1877 hdev = hci_dev_hold(d);
1878 break;
1879 }
1880 }
1881 read_unlock(&hci_dev_list_lock);
1882 return hdev;
1883}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884
1885/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001886
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001887bool hci_discovery_active(struct hci_dev *hdev)
1888{
1889 struct discovery_state *discov = &hdev->discovery;
1890
Andre Guedes6fbe1952012-02-03 17:47:58 -03001891 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001892 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001893 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001894 return true;
1895
Andre Guedes6fbe1952012-02-03 17:47:58 -03001896 default:
1897 return false;
1898 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001899}
1900
Johan Hedbergff9ef572012-01-04 14:23:45 +02001901void hci_discovery_set_state(struct hci_dev *hdev, int state)
1902{
1903 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1904
1905 if (hdev->discovery.state == state)
1906 return;
1907
1908 switch (state) {
1909 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001910 hci_update_background_scan(hdev);
1911
Andre Guedes7b99b652012-02-13 15:41:02 -03001912 if (hdev->discovery.state != DISCOVERY_STARTING)
1913 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001914 break;
1915 case DISCOVERY_STARTING:
1916 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001917 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001918 mgmt_discovering(hdev, 1);
1919 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001920 case DISCOVERY_RESOLVING:
1921 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001922 case DISCOVERY_STOPPING:
1923 break;
1924 }
1925
1926 hdev->discovery.state = state;
1927}
1928
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001929void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930{
Johan Hedberg30883512012-01-04 14:16:21 +02001931 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001932 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
Johan Hedberg561aafb2012-01-04 13:31:59 +02001934 list_for_each_entry_safe(p, n, &cache->all, all) {
1935 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001936 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001938
1939 INIT_LIST_HEAD(&cache->unknown);
1940 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941}
1942
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001943struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1944 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Johan Hedberg30883512012-01-04 14:16:21 +02001946 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 struct inquiry_entry *e;
1948
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001949 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Johan Hedberg561aafb2012-01-04 13:31:59 +02001951 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001953 return e;
1954 }
1955
1956 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957}
1958
Johan Hedberg561aafb2012-01-04 13:31:59 +02001959struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001960 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001961{
Johan Hedberg30883512012-01-04 14:16:21 +02001962 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001963 struct inquiry_entry *e;
1964
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001965 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966
1967 list_for_each_entry(e, &cache->unknown, list) {
1968 if (!bacmp(&e->data.bdaddr, bdaddr))
1969 return e;
1970 }
1971
1972 return NULL;
1973}
1974
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001975struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001976 bdaddr_t *bdaddr,
1977 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001978{
1979 struct discovery_state *cache = &hdev->discovery;
1980 struct inquiry_entry *e;
1981
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001982 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001983
1984 list_for_each_entry(e, &cache->resolve, list) {
1985 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1986 return e;
1987 if (!bacmp(&e->data.bdaddr, bdaddr))
1988 return e;
1989 }
1990
1991 return NULL;
1992}
1993
Johan Hedberga3d4e202012-01-09 00:53:02 +02001994void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001995 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001996{
1997 struct discovery_state *cache = &hdev->discovery;
1998 struct list_head *pos = &cache->resolve;
1999 struct inquiry_entry *p;
2000
2001 list_del(&ie->list);
2002
2003 list_for_each_entry(p, &cache->resolve, list) {
2004 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002005 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002006 break;
2007 pos = &p->list;
2008 }
2009
2010 list_add(&ie->list, pos);
2011}
2012
Marcel Holtmannaf589252014-07-01 14:11:20 +02002013u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2014 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015{
Johan Hedberg30883512012-01-04 14:16:21 +02002016 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002017 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002018 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002020 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021
Szymon Janc2b2fec42012-11-20 11:38:54 +01002022 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2023
Marcel Holtmannaf589252014-07-01 14:11:20 +02002024 if (!data->ssp_mode)
2025 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002026
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002027 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002028 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002029 if (!ie->data.ssp_mode)
2030 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002031
Johan Hedberga3d4e202012-01-09 00:53:02 +02002032 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002033 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002034 ie->data.rssi = data->rssi;
2035 hci_inquiry_cache_update_resolve(hdev, ie);
2036 }
2037
Johan Hedberg561aafb2012-01-04 13:31:59 +02002038 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002039 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002040
Johan Hedberg561aafb2012-01-04 13:31:59 +02002041 /* Entry not in the cache. Add new one. */
2042 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002043 if (!ie) {
2044 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2045 goto done;
2046 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047
2048 list_add(&ie->all, &cache->all);
2049
2050 if (name_known) {
2051 ie->name_state = NAME_KNOWN;
2052 } else {
2053 ie->name_state = NAME_NOT_KNOWN;
2054 list_add(&ie->list, &cache->unknown);
2055 }
2056
2057update:
2058 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002059 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060 ie->name_state = NAME_KNOWN;
2061 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 }
2063
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002064 memcpy(&ie->data, data, sizeof(*data));
2065 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002067
2068 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002069 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002070
Marcel Holtmannaf589252014-07-01 14:11:20 +02002071done:
2072 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073}
2074
2075static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2076{
Johan Hedberg30883512012-01-04 14:16:21 +02002077 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 struct inquiry_info *info = (struct inquiry_info *) buf;
2079 struct inquiry_entry *e;
2080 int copied = 0;
2081
Johan Hedberg561aafb2012-01-04 13:31:59 +02002082 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002084
2085 if (copied >= num)
2086 break;
2087
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 bacpy(&info->bdaddr, &data->bdaddr);
2089 info->pscan_rep_mode = data->pscan_rep_mode;
2090 info->pscan_period_mode = data->pscan_period_mode;
2091 info->pscan_mode = data->pscan_mode;
2092 memcpy(info->dev_class, data->dev_class, 3);
2093 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002096 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 }
2098
2099 BT_DBG("cache %p, copied %d", cache, copied);
2100 return copied;
2101}
2102
Johan Hedberg42c6b122013-03-05 20:37:49 +02002103static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104{
2105 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002106 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 struct hci_cp_inquiry cp;
2108
2109 BT_DBG("%s", hdev->name);
2110
2111 if (test_bit(HCI_INQUIRY, &hdev->flags))
2112 return;
2113
2114 /* Start Inquiry */
2115 memcpy(&cp.lap, &ir->lap, 3);
2116 cp.length = ir->length;
2117 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002118 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119}
2120
Andre Guedes3e13fa12013-03-27 20:04:56 -03002121static int wait_inquiry(void *word)
2122{
2123 schedule();
2124 return signal_pending(current);
2125}
2126
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127int hci_inquiry(void __user *arg)
2128{
2129 __u8 __user *ptr = arg;
2130 struct hci_inquiry_req ir;
2131 struct hci_dev *hdev;
2132 int err = 0, do_inquiry = 0, max_rsp;
2133 long timeo;
2134 __u8 *buf;
2135
2136 if (copy_from_user(&ir, ptr, sizeof(ir)))
2137 return -EFAULT;
2138
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002139 hdev = hci_dev_get(ir.dev_id);
2140 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 return -ENODEV;
2142
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002143 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2144 err = -EBUSY;
2145 goto done;
2146 }
2147
Marcel Holtmann4a964402014-07-02 19:10:33 +02002148 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002149 err = -EOPNOTSUPP;
2150 goto done;
2151 }
2152
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002153 if (hdev->dev_type != HCI_BREDR) {
2154 err = -EOPNOTSUPP;
2155 goto done;
2156 }
2157
Johan Hedberg56f87902013-10-02 13:43:13 +03002158 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2159 err = -EOPNOTSUPP;
2160 goto done;
2161 }
2162
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002163 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002164 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002165 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002166 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 do_inquiry = 1;
2168 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002169 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
Marcel Holtmann04837f62006-07-03 10:02:33 +02002171 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002172
2173 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002174 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2175 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002176 if (err < 0)
2177 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002178
2179 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2180 * cleared). If it is interrupted by a signal, return -EINTR.
2181 */
2182 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2183 TASK_INTERRUPTIBLE))
2184 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002185 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002187 /* for unlimited number of responses we will use buffer with
2188 * 255 entries
2189 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2191
2192 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2193 * copy it to the user space.
2194 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002195 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002196 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 err = -ENOMEM;
2198 goto done;
2199 }
2200
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002201 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002203 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 BT_DBG("num_rsp %d", ir.num_rsp);
2206
2207 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2208 ptr += sizeof(ir);
2209 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002210 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002212 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 err = -EFAULT;
2214
2215 kfree(buf);
2216
2217done:
2218 hci_dev_put(hdev);
2219 return err;
2220}
2221
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002222static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 int ret = 0;
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 BT_DBG("%s %p", hdev->name, hdev);
2227
2228 hci_req_lock(hdev);
2229
Johan Hovold94324962012-03-15 14:48:41 +01002230 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2231 ret = -ENODEV;
2232 goto done;
2233 }
2234
Marcel Holtmannd603b762014-07-06 12:11:14 +02002235 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2236 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002237 /* Check for rfkill but allow the HCI setup stage to
2238 * proceed (which in itself doesn't cause any RF activity).
2239 */
2240 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2241 ret = -ERFKILL;
2242 goto done;
2243 }
2244
2245 /* Check for valid public address or a configured static
2246 * random adddress, but let the HCI setup proceed to
2247 * be able to determine if there is a public address
2248 * or not.
2249 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002250 * In case of user channel usage, it is not important
2251 * if a public address or static random address is
2252 * available.
2253 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002254 * This check is only valid for BR/EDR controllers
2255 * since AMP controllers do not have an address.
2256 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002257 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2258 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002259 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2260 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2261 ret = -EADDRNOTAVAIL;
2262 goto done;
2263 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002264 }
2265
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 if (test_bit(HCI_UP, &hdev->flags)) {
2267 ret = -EALREADY;
2268 goto done;
2269 }
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 if (hdev->open(hdev)) {
2272 ret = -EIO;
2273 goto done;
2274 }
2275
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002276 atomic_set(&hdev->cmd_cnt, 1);
2277 set_bit(HCI_INIT, &hdev->flags);
2278
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002279 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2280 if (hdev->setup)
2281 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002282
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002283 /* The transport driver can set these quirks before
2284 * creating the HCI device or in its setup callback.
2285 *
2286 * In case any of them is set, the controller has to
2287 * start up as unconfigured.
2288 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002289 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2290 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002291 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002292
2293 /* For an unconfigured controller it is required to
2294 * read at least the version information provided by
2295 * the Read Local Version Information command.
2296 *
2297 * If the set_bdaddr driver callback is provided, then
2298 * also the original Bluetooth public device address
2299 * will be read using the Read BD Address command.
2300 */
2301 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2302 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002303 }
2304
Marcel Holtmann9713c172014-07-06 12:11:15 +02002305 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2306 /* If public address change is configured, ensure that
2307 * the address gets programmed. If the driver does not
2308 * support changing the public address, fail the power
2309 * on procedure.
2310 */
2311 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2312 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002313 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2314 else
2315 ret = -EADDRNOTAVAIL;
2316 }
2317
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002318 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002319 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002320 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002321 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 }
2323
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002324 clear_bit(HCI_INIT, &hdev->flags);
2325
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 if (!ret) {
2327 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002328 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 set_bit(HCI_UP, &hdev->flags);
2330 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002331 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002332 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002333 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002334 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002335 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002336 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002337 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002338 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002339 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002340 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002342 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002343 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002344 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345
2346 skb_queue_purge(&hdev->cmd_q);
2347 skb_queue_purge(&hdev->rx_q);
2348
2349 if (hdev->flush)
2350 hdev->flush(hdev);
2351
2352 if (hdev->sent_cmd) {
2353 kfree_skb(hdev->sent_cmd);
2354 hdev->sent_cmd = NULL;
2355 }
2356
2357 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002358 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 }
2360
2361done:
2362 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 return ret;
2364}
2365
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002366/* ---- HCI ioctl helpers ---- */
2367
2368int hci_dev_open(__u16 dev)
2369{
2370 struct hci_dev *hdev;
2371 int err;
2372
2373 hdev = hci_dev_get(dev);
2374 if (!hdev)
2375 return -ENODEV;
2376
Marcel Holtmann4a964402014-07-02 19:10:33 +02002377 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002378 * up as user channel. Trying to bring them up as normal devices
2379 * will result into a failure. Only user channel operation is
2380 * possible.
2381 *
2382 * When this function is called for a user channel, the flag
2383 * HCI_USER_CHANNEL will be set first before attempting to
2384 * open the device.
2385 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002386 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002387 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2388 err = -EOPNOTSUPP;
2389 goto done;
2390 }
2391
Johan Hedberge1d08f42013-10-01 22:44:50 +03002392 /* We need to ensure that no other power on/off work is pending
2393 * before proceeding to call hci_dev_do_open. This is
2394 * particularly important if the setup procedure has not yet
2395 * completed.
2396 */
2397 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2398 cancel_delayed_work(&hdev->power_off);
2399
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002400 /* After this call it is guaranteed that the setup procedure
2401 * has finished. This means that error conditions like RFKILL
2402 * or no valid public or static random address apply.
2403 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002404 flush_workqueue(hdev->req_workqueue);
2405
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002406 err = hci_dev_do_open(hdev);
2407
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002408done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002409 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002410 return err;
2411}
2412
Johan Hedbergd7347f32014-07-04 12:37:23 +03002413/* This function requires the caller holds hdev->lock */
2414static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2415{
2416 struct hci_conn_params *p;
2417
2418 list_for_each_entry(p, &hdev->le_conn_params, list)
2419 list_del_init(&p->action);
2420
2421 BT_DBG("All LE pending actions cleared");
2422}
2423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424static int hci_dev_do_close(struct hci_dev *hdev)
2425{
2426 BT_DBG("%s %p", hdev->name, hdev);
2427
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002428 cancel_delayed_work(&hdev->power_off);
2429
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 hci_req_cancel(hdev, ENODEV);
2431 hci_req_lock(hdev);
2432
2433 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002434 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435 hci_req_unlock(hdev);
2436 return 0;
2437 }
2438
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002439 /* Flush RX and TX works */
2440 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002441 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002443 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002444 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002445 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002446 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002447 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002448 }
2449
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002450 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002451 cancel_delayed_work(&hdev->service_cache);
2452
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002453 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002454
2455 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2456 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002457
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002458 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002459 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 hci_conn_hash_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002461 hci_pend_le_actions_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002462 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
2464 hci_notify(hdev, HCI_DEV_DOWN);
2465
2466 if (hdev->flush)
2467 hdev->flush(hdev);
2468
2469 /* Reset device */
2470 skb_queue_purge(&hdev->cmd_q);
2471 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002472 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2473 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002474 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002476 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 clear_bit(HCI_INIT, &hdev->flags);
2478 }
2479
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002480 /* flush cmd work */
2481 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
2483 /* Drop queues */
2484 skb_queue_purge(&hdev->rx_q);
2485 skb_queue_purge(&hdev->cmd_q);
2486 skb_queue_purge(&hdev->raw_q);
2487
2488 /* Drop last sent command */
2489 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002490 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491 kfree_skb(hdev->sent_cmd);
2492 hdev->sent_cmd = NULL;
2493 }
2494
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002495 kfree_skb(hdev->recv_evt);
2496 hdev->recv_evt = NULL;
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* After this point our queues are empty
2499 * and no tasks are scheduled. */
2500 hdev->close(hdev);
2501
Johan Hedberg35b973c2013-03-15 17:06:59 -05002502 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002503 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002504 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2505
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002506 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2507 if (hdev->dev_type == HCI_BREDR) {
2508 hci_dev_lock(hdev);
2509 mgmt_powered(hdev, 0);
2510 hci_dev_unlock(hdev);
2511 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002512 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002513
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002514 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002515 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002516
Johan Hedberge59fda82012-02-22 18:11:53 +02002517 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002518 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002519 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 hci_req_unlock(hdev);
2522
2523 hci_dev_put(hdev);
2524 return 0;
2525}
2526
2527int hci_dev_close(__u16 dev)
2528{
2529 struct hci_dev *hdev;
2530 int err;
2531
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002532 hdev = hci_dev_get(dev);
2533 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002535
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002536 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2537 err = -EBUSY;
2538 goto done;
2539 }
2540
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002541 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2542 cancel_delayed_work(&hdev->power_off);
2543
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002545
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002546done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 hci_dev_put(hdev);
2548 return err;
2549}
2550
2551int hci_dev_reset(__u16 dev)
2552{
2553 struct hci_dev *hdev;
2554 int ret = 0;
2555
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002556 hdev = hci_dev_get(dev);
2557 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 return -ENODEV;
2559
2560 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
Marcel Holtmann808a0492013-08-26 20:57:58 -07002562 if (!test_bit(HCI_UP, &hdev->flags)) {
2563 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002565 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002567 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2568 ret = -EBUSY;
2569 goto done;
2570 }
2571
Marcel Holtmann4a964402014-07-02 19:10:33 +02002572 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002573 ret = -EOPNOTSUPP;
2574 goto done;
2575 }
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 /* Drop queues */
2578 skb_queue_purge(&hdev->rx_q);
2579 skb_queue_purge(&hdev->cmd_q);
2580
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002581 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002582 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002584 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585
2586 if (hdev->flush)
2587 hdev->flush(hdev);
2588
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002589 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002590 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002592 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
2594done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 hci_req_unlock(hdev);
2596 hci_dev_put(hdev);
2597 return ret;
2598}
2599
2600int hci_dev_reset_stat(__u16 dev)
2601{
2602 struct hci_dev *hdev;
2603 int ret = 0;
2604
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002605 hdev = hci_dev_get(dev);
2606 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 return -ENODEV;
2608
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002609 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2610 ret = -EBUSY;
2611 goto done;
2612 }
2613
Marcel Holtmann4a964402014-07-02 19:10:33 +02002614 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002615 ret = -EOPNOTSUPP;
2616 goto done;
2617 }
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2620
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002621done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 return ret;
2624}
2625
2626int hci_dev_cmd(unsigned int cmd, void __user *arg)
2627{
2628 struct hci_dev *hdev;
2629 struct hci_dev_req dr;
2630 int err = 0;
2631
2632 if (copy_from_user(&dr, arg, sizeof(dr)))
2633 return -EFAULT;
2634
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002635 hdev = hci_dev_get(dr.dev_id);
2636 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637 return -ENODEV;
2638
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002639 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2640 err = -EBUSY;
2641 goto done;
2642 }
2643
Marcel Holtmann4a964402014-07-02 19:10:33 +02002644 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002645 err = -EOPNOTSUPP;
2646 goto done;
2647 }
2648
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002649 if (hdev->dev_type != HCI_BREDR) {
2650 err = -EOPNOTSUPP;
2651 goto done;
2652 }
2653
Johan Hedberg56f87902013-10-02 13:43:13 +03002654 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2655 err = -EOPNOTSUPP;
2656 goto done;
2657 }
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 switch (cmd) {
2660 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002661 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2662 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 break;
2664
2665 case HCISETENCRYPT:
2666 if (!lmp_encrypt_capable(hdev)) {
2667 err = -EOPNOTSUPP;
2668 break;
2669 }
2670
2671 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2672 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002673 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2674 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 if (err)
2676 break;
2677 }
2678
Johan Hedberg01178cd2013-03-05 20:37:41 +02002679 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2680 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 break;
2682
2683 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002684 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2685 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 break;
2687
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002688 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002689 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2690 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002691 break;
2692
2693 case HCISETLINKMODE:
2694 hdev->link_mode = ((__u16) dr.dev_opt) &
2695 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2696 break;
2697
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698 case HCISETPTYPE:
2699 hdev->pkt_type = (__u16) dr.dev_opt;
2700 break;
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002703 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2704 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 break;
2706
2707 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002708 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2709 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 break;
2711
2712 default:
2713 err = -EINVAL;
2714 break;
2715 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002716
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002717done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 hci_dev_put(hdev);
2719 return err;
2720}
2721
2722int hci_get_dev_list(void __user *arg)
2723{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002724 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 struct hci_dev_list_req *dl;
2726 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 int n = 0, size, err;
2728 __u16 dev_num;
2729
2730 if (get_user(dev_num, (__u16 __user *) arg))
2731 return -EFAULT;
2732
2733 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2734 return -EINVAL;
2735
2736 size = sizeof(*dl) + dev_num * sizeof(*dr);
2737
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002738 dl = kzalloc(size, GFP_KERNEL);
2739 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 return -ENOMEM;
2741
2742 dr = dl->dev_req;
2743
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002744 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002745 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002746 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002747 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002748
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002749 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2750 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 (dr + n)->dev_id = hdev->id;
2753 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002754
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 if (++n >= dev_num)
2756 break;
2757 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002758 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759
2760 dl->dev_num = n;
2761 size = sizeof(*dl) + n * sizeof(*dr);
2762
2763 err = copy_to_user(arg, dl, size);
2764 kfree(dl);
2765
2766 return err ? -EFAULT : 0;
2767}
2768
2769int hci_get_dev_info(void __user *arg)
2770{
2771 struct hci_dev *hdev;
2772 struct hci_dev_info di;
2773 int err = 0;
2774
2775 if (copy_from_user(&di, arg, sizeof(di)))
2776 return -EFAULT;
2777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002778 hdev = hci_dev_get(di.dev_id);
2779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 return -ENODEV;
2781
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002782 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002783 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002784
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002785 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2786 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 strcpy(di.name, hdev->name);
2789 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002790 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 di.flags = hdev->flags;
2792 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002793 if (lmp_bredr_capable(hdev)) {
2794 di.acl_mtu = hdev->acl_mtu;
2795 di.acl_pkts = hdev->acl_pkts;
2796 di.sco_mtu = hdev->sco_mtu;
2797 di.sco_pkts = hdev->sco_pkts;
2798 } else {
2799 di.acl_mtu = hdev->le_mtu;
2800 di.acl_pkts = hdev->le_pkts;
2801 di.sco_mtu = 0;
2802 di.sco_pkts = 0;
2803 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 di.link_policy = hdev->link_policy;
2805 di.link_mode = hdev->link_mode;
2806
2807 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2808 memcpy(&di.features, &hdev->features, sizeof(di.features));
2809
2810 if (copy_to_user(arg, &di, sizeof(di)))
2811 err = -EFAULT;
2812
2813 hci_dev_put(hdev);
2814
2815 return err;
2816}
2817
2818/* ---- Interface to HCI drivers ---- */
2819
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002820static int hci_rfkill_set_block(void *data, bool blocked)
2821{
2822 struct hci_dev *hdev = data;
2823
2824 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2825
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002826 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2827 return -EBUSY;
2828
Johan Hedberg5e130362013-09-13 08:58:17 +03002829 if (blocked) {
2830 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002831 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2832 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002833 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002834 } else {
2835 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002836 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002837
2838 return 0;
2839}
2840
2841static const struct rfkill_ops hci_rfkill_ops = {
2842 .set_block = hci_rfkill_set_block,
2843};
2844
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002845static void hci_power_on(struct work_struct *work)
2846{
2847 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002848 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002849
2850 BT_DBG("%s", hdev->name);
2851
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002852 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002853 if (err < 0) {
2854 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002855 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002856 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002857
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002858 /* During the HCI setup phase, a few error conditions are
2859 * ignored and they need to be checked now. If they are still
2860 * valid, it is important to turn the device back off.
2861 */
2862 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02002863 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002864 (hdev->dev_type == HCI_BREDR &&
2865 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2866 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002867 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2868 hci_dev_do_close(hdev);
2869 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002870 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2871 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002872 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002873
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002874 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002875 /* For unconfigured devices, set the HCI_RAW flag
2876 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02002877 */
2878 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2879 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02002880
2881 /* For fully configured devices, this will send
2882 * the Index Added event. For unconfigured devices,
2883 * it will send Unconfigued Index Added event.
2884 *
2885 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2886 * and no event will be send.
2887 */
2888 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002889 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02002890 /* When the controller is now configured, then it
2891 * is important to clear the HCI_RAW flag.
2892 */
2893 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2894 clear_bit(HCI_RAW, &hdev->flags);
2895
Marcel Holtmannd603b762014-07-06 12:11:14 +02002896 /* Powering on the controller with HCI_CONFIG set only
2897 * happens with the transition from unconfigured to
2898 * configured. This will send the Index Added event.
2899 */
2900 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002901 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002902}
2903
2904static void hci_power_off(struct work_struct *work)
2905{
Johan Hedberg32435532011-11-07 22:16:04 +02002906 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002907 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002908
2909 BT_DBG("%s", hdev->name);
2910
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002911 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002912}
2913
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002914static void hci_discov_off(struct work_struct *work)
2915{
2916 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002917
2918 hdev = container_of(work, struct hci_dev, discov_off.work);
2919
2920 BT_DBG("%s", hdev->name);
2921
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002922 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002923}
2924
Johan Hedberg35f74982014-02-18 17:14:32 +02002925void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002926{
Johan Hedberg48210022013-01-27 00:31:28 +02002927 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002928
Johan Hedberg48210022013-01-27 00:31:28 +02002929 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2930 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002931 kfree(uuid);
2932 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002933}
2934
Johan Hedberg35f74982014-02-18 17:14:32 +02002935void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002936{
2937 struct list_head *p, *n;
2938
2939 list_for_each_safe(p, n, &hdev->link_keys) {
2940 struct link_key *key;
2941
2942 key = list_entry(p, struct link_key, list);
2943
2944 list_del(p);
2945 kfree(key);
2946 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002947}
2948
Johan Hedberg35f74982014-02-18 17:14:32 +02002949void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002950{
2951 struct smp_ltk *k, *tmp;
2952
2953 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2954 list_del(&k->list);
2955 kfree(k);
2956 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002957}
2958
Johan Hedberg970c4e42014-02-18 10:19:33 +02002959void hci_smp_irks_clear(struct hci_dev *hdev)
2960{
2961 struct smp_irk *k, *tmp;
2962
2963 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2964 list_del(&k->list);
2965 kfree(k);
2966 }
2967}
2968
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002969struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2970{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002971 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002972
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002973 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002974 if (bacmp(bdaddr, &k->bdaddr) == 0)
2975 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002976
2977 return NULL;
2978}
2979
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302980static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002981 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002982{
2983 /* Legacy key */
2984 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302985 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002986
2987 /* Debug keys are insecure so don't store them persistently */
2988 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302989 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002990
2991 /* Changed combination key and there's no previous one */
2992 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302993 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002994
2995 /* Security mode 3 case */
2996 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302997 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002998
2999 /* Neither local nor remote side had no-bonding as requirement */
3000 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303001 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003002
3003 /* Local side had dedicated bonding as requirement */
3004 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303005 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003006
3007 /* Remote side had dedicated bonding as requirement */
3008 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303009 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003010
3011 /* If none of the above criteria match, then don't store the key
3012 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303013 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003014}
3015
Johan Hedberg98a0b842014-01-30 19:40:00 -08003016static bool ltk_type_master(u8 type)
3017{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03003018 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08003019}
3020
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003021struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003022 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003023{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003024 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003025
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003026 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003027 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003028 continue;
3029
Johan Hedberg98a0b842014-01-30 19:40:00 -08003030 if (ltk_type_master(k->type) != master)
3031 continue;
3032
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003033 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003034 }
3035
3036 return NULL;
3037}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003038
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003039struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08003040 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003042 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003043
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003044 list_for_each_entry(k, &hdev->long_term_keys, list)
3045 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003046 bacmp(bdaddr, &k->bdaddr) == 0 &&
3047 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003048 return k;
3049
3050 return NULL;
3051}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003052
Johan Hedberg970c4e42014-02-18 10:19:33 +02003053struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3054{
3055 struct smp_irk *irk;
3056
3057 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3058 if (!bacmp(&irk->rpa, rpa))
3059 return irk;
3060 }
3061
3062 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3063 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3064 bacpy(&irk->rpa, rpa);
3065 return irk;
3066 }
3067 }
3068
3069 return NULL;
3070}
3071
3072struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3073 u8 addr_type)
3074{
3075 struct smp_irk *irk;
3076
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003077 /* Identity Address must be public or static random */
3078 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3079 return NULL;
3080
Johan Hedberg970c4e42014-02-18 10:19:33 +02003081 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3082 if (addr_type == irk->addr_type &&
3083 bacmp(bdaddr, &irk->bdaddr) == 0)
3084 return irk;
3085 }
3086
3087 return NULL;
3088}
3089
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003090struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003091 bdaddr_t *bdaddr, u8 *val, u8 type,
3092 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003093{
3094 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303095 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003096
3097 old_key = hci_find_link_key(hdev, bdaddr);
3098 if (old_key) {
3099 old_key_type = old_key->type;
3100 key = old_key;
3101 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003102 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003103 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003104 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003105 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003106 list_add(&key->list, &hdev->link_keys);
3107 }
3108
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003109 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003110
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003111 /* Some buggy controller combinations generate a changed
3112 * combination key for legacy pairing even when there's no
3113 * previous key */
3114 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003115 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003116 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003117 if (conn)
3118 conn->key_type = type;
3119 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003120
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003121 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003122 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003123 key->pin_len = pin_len;
3124
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003125 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003126 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003127 else
3128 key->type = type;
3129
Johan Hedberg7652ff62014-06-24 13:15:49 +03003130 if (persistent)
3131 *persistent = hci_persistent_key(hdev, conn, type,
3132 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003133
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003134 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003135}
3136
Johan Hedbergca9142b2014-02-19 14:57:44 +02003137struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003138 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003139 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003140{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003141 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003142 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003143
Johan Hedberg98a0b842014-01-30 19:40:00 -08003144 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003145 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003146 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003147 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003148 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003149 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003150 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003151 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003152 }
3153
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003154 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003155 key->bdaddr_type = addr_type;
3156 memcpy(key->val, tk, sizeof(key->val));
3157 key->authenticated = authenticated;
3158 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003159 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003160 key->enc_size = enc_size;
3161 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003162
Johan Hedbergca9142b2014-02-19 14:57:44 +02003163 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003164}
3165
Johan Hedbergca9142b2014-02-19 14:57:44 +02003166struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3167 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003168{
3169 struct smp_irk *irk;
3170
3171 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3172 if (!irk) {
3173 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3174 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003175 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003176
3177 bacpy(&irk->bdaddr, bdaddr);
3178 irk->addr_type = addr_type;
3179
3180 list_add(&irk->list, &hdev->identity_resolving_keys);
3181 }
3182
3183 memcpy(irk->val, val, 16);
3184 bacpy(&irk->rpa, rpa);
3185
Johan Hedbergca9142b2014-02-19 14:57:44 +02003186 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003187}
3188
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003189int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3190{
3191 struct link_key *key;
3192
3193 key = hci_find_link_key(hdev, bdaddr);
3194 if (!key)
3195 return -ENOENT;
3196
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003197 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003198
3199 list_del(&key->list);
3200 kfree(key);
3201
3202 return 0;
3203}
3204
Johan Hedberge0b2b272014-02-18 17:14:31 +02003205int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003206{
3207 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003208 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003209
3210 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003211 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003212 continue;
3213
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003214 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003215
3216 list_del(&k->list);
3217 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003218 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003219 }
3220
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003221 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003222}
3223
Johan Hedberga7ec7332014-02-18 17:14:35 +02003224void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3225{
3226 struct smp_irk *k, *tmp;
3227
Johan Hedberg668b7b12014-02-21 16:03:31 +02003228 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003229 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3230 continue;
3231
3232 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3233
3234 list_del(&k->list);
3235 kfree(k);
3236 }
3237}
3238
Ville Tervo6bd32322011-02-16 16:32:41 +02003239/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003240static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003241{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003242 struct hci_dev *hdev = container_of(work, struct hci_dev,
3243 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003244
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003245 if (hdev->sent_cmd) {
3246 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3247 u16 opcode = __le16_to_cpu(sent->opcode);
3248
3249 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3250 } else {
3251 BT_ERR("%s command tx timeout", hdev->name);
3252 }
3253
Ville Tervo6bd32322011-02-16 16:32:41 +02003254 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003255 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003256}
3257
Szymon Janc2763eda2011-03-22 13:12:22 +01003258struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003259 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003260{
3261 struct oob_data *data;
3262
3263 list_for_each_entry(data, &hdev->remote_oob_data, list)
3264 if (bacmp(bdaddr, &data->bdaddr) == 0)
3265 return data;
3266
3267 return NULL;
3268}
3269
3270int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3271{
3272 struct oob_data *data;
3273
3274 data = hci_find_remote_oob_data(hdev, bdaddr);
3275 if (!data)
3276 return -ENOENT;
3277
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003278 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003279
3280 list_del(&data->list);
3281 kfree(data);
3282
3283 return 0;
3284}
3285
Johan Hedberg35f74982014-02-18 17:14:32 +02003286void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003287{
3288 struct oob_data *data, *n;
3289
3290 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3291 list_del(&data->list);
3292 kfree(data);
3293 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003294}
3295
Marcel Holtmann07988722014-01-10 02:07:29 -08003296int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3297 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003298{
3299 struct oob_data *data;
3300
3301 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003302 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003303 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003304 if (!data)
3305 return -ENOMEM;
3306
3307 bacpy(&data->bdaddr, bdaddr);
3308 list_add(&data->list, &hdev->remote_oob_data);
3309 }
3310
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003311 memcpy(data->hash192, hash, sizeof(data->hash192));
3312 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003313
Marcel Holtmann07988722014-01-10 02:07:29 -08003314 memset(data->hash256, 0, sizeof(data->hash256));
3315 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3316
3317 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3318
3319 return 0;
3320}
3321
3322int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3323 u8 *hash192, u8 *randomizer192,
3324 u8 *hash256, u8 *randomizer256)
3325{
3326 struct oob_data *data;
3327
3328 data = hci_find_remote_oob_data(hdev, bdaddr);
3329 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003330 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003331 if (!data)
3332 return -ENOMEM;
3333
3334 bacpy(&data->bdaddr, bdaddr);
3335 list_add(&data->list, &hdev->remote_oob_data);
3336 }
3337
3338 memcpy(data->hash192, hash192, sizeof(data->hash192));
3339 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3340
3341 memcpy(data->hash256, hash256, sizeof(data->hash256));
3342 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3343
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003344 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003345
3346 return 0;
3347}
3348
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003349struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3350 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003351{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003352 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003353
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003354 list_for_each_entry(b, &hdev->blacklist, list) {
3355 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003356 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003357 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003358
3359 return NULL;
3360}
3361
Marcel Holtmannc9507492014-02-27 19:35:54 -08003362static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003363{
3364 struct list_head *p, *n;
3365
3366 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003367 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003368
3369 list_del(p);
3370 kfree(b);
3371 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003372}
3373
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003374int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003375{
3376 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003377
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003378 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003379 return -EBADF;
3380
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003381 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003382 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003383
3384 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003385 if (!entry)
3386 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003387
3388 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003389 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003390
3391 list_add(&entry->list, &hdev->blacklist);
3392
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003393 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003394}
3395
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003396int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003397{
3398 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003399
Johan Hedberg35f74982014-02-18 17:14:32 +02003400 if (!bacmp(bdaddr, BDADDR_ANY)) {
3401 hci_blacklist_clear(hdev);
3402 return 0;
3403 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003404
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003405 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003406 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003407 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003408
3409 list_del(&entry->list);
3410 kfree(entry);
3411
Johan Hedberg2a8357f2014-07-01 22:09:47 +03003412 return 0;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003413}
3414
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003415struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3416 bdaddr_t *bdaddr, u8 type)
3417{
3418 struct bdaddr_list *b;
3419
3420 list_for_each_entry(b, &hdev->le_white_list, list) {
3421 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3422 return b;
3423 }
3424
3425 return NULL;
3426}
3427
3428void hci_white_list_clear(struct hci_dev *hdev)
3429{
3430 struct list_head *p, *n;
3431
3432 list_for_each_safe(p, n, &hdev->le_white_list) {
3433 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3434
3435 list_del(p);
3436 kfree(b);
3437 }
3438}
3439
3440int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3441{
3442 struct bdaddr_list *entry;
3443
3444 if (!bacmp(bdaddr, BDADDR_ANY))
3445 return -EBADF;
3446
3447 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3448 if (!entry)
3449 return -ENOMEM;
3450
3451 bacpy(&entry->bdaddr, bdaddr);
3452 entry->bdaddr_type = type;
3453
3454 list_add(&entry->list, &hdev->le_white_list);
3455
3456 return 0;
3457}
3458
3459int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3460{
3461 struct bdaddr_list *entry;
3462
3463 if (!bacmp(bdaddr, BDADDR_ANY))
3464 return -EBADF;
3465
3466 entry = hci_white_list_lookup(hdev, bdaddr, type);
3467 if (!entry)
3468 return -ENOENT;
3469
3470 list_del(&entry->list);
3471 kfree(entry);
3472
3473 return 0;
3474}
3475
Andre Guedes15819a72014-02-03 13:56:18 -03003476/* This function requires the caller holds hdev->lock */
3477struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3478 bdaddr_t *addr, u8 addr_type)
3479{
3480 struct hci_conn_params *params;
3481
Johan Hedberg738f6182014-07-03 19:33:51 +03003482 /* The conn params list only contains identity addresses */
3483 if (!hci_is_identity_address(addr, addr_type))
3484 return NULL;
3485
Andre Guedes15819a72014-02-03 13:56:18 -03003486 list_for_each_entry(params, &hdev->le_conn_params, list) {
3487 if (bacmp(&params->addr, addr) == 0 &&
3488 params->addr_type == addr_type) {
3489 return params;
3490 }
3491 }
3492
3493 return NULL;
3494}
3495
Andre Guedescef952c2014-02-26 20:21:49 -03003496static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3497{
3498 struct hci_conn *conn;
3499
3500 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3501 if (!conn)
3502 return false;
3503
3504 if (conn->dst_type != type)
3505 return false;
3506
3507 if (conn->state != BT_CONNECTED)
3508 return false;
3509
3510 return true;
3511}
3512
Andre Guedes15819a72014-02-03 13:56:18 -03003513/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003514struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3515 bdaddr_t *addr, u8 addr_type)
Marcel Holtmann4b109662014-06-29 13:41:49 +02003516{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003517 struct hci_conn_params *param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003518
Johan Hedberg738f6182014-07-03 19:33:51 +03003519 /* The list only contains identity addresses */
3520 if (!hci_is_identity_address(addr, addr_type))
3521 return NULL;
3522
Johan Hedberg501f8822014-07-04 12:37:26 +03003523 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003524 if (bacmp(&param->addr, addr) == 0 &&
3525 param->addr_type == addr_type)
3526 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003527 }
3528
3529 return NULL;
3530}
3531
3532/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003533struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3534 bdaddr_t *addr, u8 addr_type)
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003535{
3536 struct hci_conn_params *params;
3537
Johan Hedbergc46245b2014-07-02 17:37:33 +03003538 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003539 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003540
3541 params = hci_conn_params_lookup(hdev, addr, addr_type);
3542 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003543 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003544
3545 params = kzalloc(sizeof(*params), GFP_KERNEL);
3546 if (!params) {
3547 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003548 return NULL;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003549 }
3550
3551 bacpy(&params->addr, addr);
3552 params->addr_type = addr_type;
3553
3554 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003555 INIT_LIST_HEAD(&params->action);
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003556
3557 params->conn_min_interval = hdev->le_conn_min_interval;
3558 params->conn_max_interval = hdev->le_conn_max_interval;
3559 params->conn_latency = hdev->le_conn_latency;
3560 params->supervision_timeout = hdev->le_supv_timeout;
3561 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3562
3563 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3564
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003565 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003566}
3567
3568/* This function requires the caller holds hdev->lock */
3569int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003570 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003571{
3572 struct hci_conn_params *params;
3573
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003574 params = hci_conn_params_add(hdev, addr, addr_type);
3575 if (!params)
3576 return -EIO;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003577
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003578 if (params->auto_connect == auto_connect)
3579 return 0;
3580
Johan Hedberg95305ba2014-07-04 12:37:21 +03003581 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003582
Andre Guedescef952c2014-02-26 20:21:49 -03003583 switch (auto_connect) {
3584 case HCI_AUTO_CONN_DISABLED:
3585 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003586 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003587 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003588 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003589 list_add(&params->action, &hdev->pend_le_reports);
3590 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003591 break;
Andre Guedescef952c2014-02-26 20:21:49 -03003592 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003593 if (!is_connected(hdev, addr, addr_type)) {
3594 list_add(&params->action, &hdev->pend_le_conns);
3595 hci_update_background_scan(hdev);
3596 }
Andre Guedescef952c2014-02-26 20:21:49 -03003597 break;
3598 }
Andre Guedes15819a72014-02-03 13:56:18 -03003599
Johan Hedberg851efca2014-07-02 22:42:00 +03003600 params->auto_connect = auto_connect;
3601
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003602 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3603 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003604
3605 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003606}
3607
3608/* This function requires the caller holds hdev->lock */
3609void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3610{
3611 struct hci_conn_params *params;
3612
3613 params = hci_conn_params_lookup(hdev, addr, addr_type);
3614 if (!params)
3615 return;
3616
Johan Hedberg95305ba2014-07-04 12:37:21 +03003617 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003618 list_del(&params->list);
3619 kfree(params);
3620
Johan Hedberg95305ba2014-07-04 12:37:21 +03003621 hci_update_background_scan(hdev);
3622
Andre Guedes15819a72014-02-03 13:56:18 -03003623 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3624}
3625
3626/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003627void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3628{
3629 struct hci_conn_params *params, *tmp;
3630
3631 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3632 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3633 continue;
3634 list_del(&params->list);
3635 kfree(params);
3636 }
3637
3638 BT_DBG("All LE disabled connection parameters were removed");
3639}
3640
3641/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003642void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003643{
3644 struct hci_conn_params *params, *tmp;
3645
3646 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberga2f41a82014-07-04 12:37:19 +03003647 list_del(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003648 list_del(&params->list);
3649 kfree(params);
3650 }
3651
Johan Hedberga2f41a82014-07-04 12:37:19 +03003652 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003653
Andre Guedes15819a72014-02-03 13:56:18 -03003654 BT_DBG("All LE connection parameters were removed");
3655}
3656
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003657static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003658{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003659 if (status) {
3660 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003661
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003662 hci_dev_lock(hdev);
3663 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3664 hci_dev_unlock(hdev);
3665 return;
3666 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003667}
3668
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003669static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003670{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003671 /* General inquiry access code (GIAC) */
3672 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3673 struct hci_request req;
3674 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003675 int err;
3676
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003677 if (status) {
3678 BT_ERR("Failed to disable LE scanning: status %d", status);
3679 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003680 }
3681
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003682 switch (hdev->discovery.type) {
3683 case DISCOV_TYPE_LE:
3684 hci_dev_lock(hdev);
3685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3686 hci_dev_unlock(hdev);
3687 break;
3688
3689 case DISCOV_TYPE_INTERLEAVED:
3690 hci_req_init(&req, hdev);
3691
3692 memset(&cp, 0, sizeof(cp));
3693 memcpy(&cp.lap, lap, sizeof(cp.lap));
3694 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3695 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3696
3697 hci_dev_lock(hdev);
3698
3699 hci_inquiry_cache_flush(hdev);
3700
3701 err = hci_req_run(&req, inquiry_complete);
3702 if (err) {
3703 BT_ERR("Inquiry request failed: err %d", err);
3704 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3705 }
3706
3707 hci_dev_unlock(hdev);
3708 break;
3709 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003710}
3711
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003712static void le_scan_disable_work(struct work_struct *work)
3713{
3714 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003715 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003716 struct hci_request req;
3717 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003718
3719 BT_DBG("%s", hdev->name);
3720
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003721 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003722
Andre Guedesb1efcc22014-02-26 20:21:40 -03003723 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003724
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003725 err = hci_req_run(&req, le_scan_disable_work_complete);
3726 if (err)
3727 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003728}
3729
Johan Hedberg8d972502014-02-28 12:54:14 +02003730static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3731{
3732 struct hci_dev *hdev = req->hdev;
3733
3734 /* If we're advertising or initiating an LE connection we can't
3735 * go ahead and change the random address at this time. This is
3736 * because the eventual initiator address used for the
3737 * subsequently created connection will be undefined (some
3738 * controllers use the new address and others the one we had
3739 * when the operation started).
3740 *
3741 * In this kind of scenario skip the update and let the random
3742 * address be updated at the next cycle.
3743 */
3744 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3745 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3746 BT_DBG("Deferring random address update");
3747 return;
3748 }
3749
3750 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3751}
3752
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003753int hci_update_random_address(struct hci_request *req, bool require_privacy,
3754 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003755{
3756 struct hci_dev *hdev = req->hdev;
3757 int err;
3758
3759 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003760 * current RPA has expired or there is something else than
3761 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003762 */
3763 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003764 int to;
3765
3766 *own_addr_type = ADDR_LE_DEV_RANDOM;
3767
3768 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003769 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003770 return 0;
3771
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003772 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003773 if (err < 0) {
3774 BT_ERR("%s failed to generate new RPA", hdev->name);
3775 return err;
3776 }
3777
Johan Hedberg8d972502014-02-28 12:54:14 +02003778 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003779
3780 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3781 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3782
3783 return 0;
3784 }
3785
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003786 /* In case of required privacy without resolvable private address,
3787 * use an unresolvable private address. This is useful for active
3788 * scanning and non-connectable advertising.
3789 */
3790 if (require_privacy) {
3791 bdaddr_t urpa;
3792
3793 get_random_bytes(&urpa, 6);
3794 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3795
3796 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003797 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003798 return 0;
3799 }
3800
Johan Hedbergebd3a742014-02-23 19:42:21 +02003801 /* If forcing static address is in use or there is no public
3802 * address use the static address as random address (but skip
3803 * the HCI command if the current random address is already the
3804 * static one.
3805 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003806 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003807 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3808 *own_addr_type = ADDR_LE_DEV_RANDOM;
3809 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3810 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3811 &hdev->static_addr);
3812 return 0;
3813 }
3814
3815 /* Neither privacy nor static address is being used so use a
3816 * public address.
3817 */
3818 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3819
3820 return 0;
3821}
3822
Johan Hedberga1f4c312014-02-27 14:05:41 +02003823/* Copy the Identity Address of the controller.
3824 *
3825 * If the controller has a public BD_ADDR, then by default use that one.
3826 * If this is a LE only controller without a public address, default to
3827 * the static random address.
3828 *
3829 * For debugging purposes it is possible to force controllers with a
3830 * public address to use the static random address instead.
3831 */
3832void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3833 u8 *bdaddr_type)
3834{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003835 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003836 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3837 bacpy(bdaddr, &hdev->static_addr);
3838 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3839 } else {
3840 bacpy(bdaddr, &hdev->bdaddr);
3841 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3842 }
3843}
3844
David Herrmann9be0dab2012-04-22 14:39:57 +02003845/* Alloc HCI device */
3846struct hci_dev *hci_alloc_dev(void)
3847{
3848 struct hci_dev *hdev;
3849
3850 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3851 if (!hdev)
3852 return NULL;
3853
David Herrmannb1b813d2012-04-22 14:39:58 +02003854 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3855 hdev->esco_type = (ESCO_HV1);
3856 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003857 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3858 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003859 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003860 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3861 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003862
David Herrmannb1b813d2012-04-22 14:39:58 +02003863 hdev->sniff_max_interval = 800;
3864 hdev->sniff_min_interval = 80;
3865
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003866 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003867 hdev->le_scan_interval = 0x0060;
3868 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003869 hdev->le_conn_min_interval = 0x0028;
3870 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003871 hdev->le_conn_latency = 0x0000;
3872 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003873
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003874 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003875 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003876 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3877 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003878
David Herrmannb1b813d2012-04-22 14:39:58 +02003879 mutex_init(&hdev->lock);
3880 mutex_init(&hdev->req_lock);
3881
3882 INIT_LIST_HEAD(&hdev->mgmt_pending);
3883 INIT_LIST_HEAD(&hdev->blacklist);
3884 INIT_LIST_HEAD(&hdev->uuids);
3885 INIT_LIST_HEAD(&hdev->link_keys);
3886 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003887 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003888 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003889 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003890 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003891 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03003892 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003893 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003894
3895 INIT_WORK(&hdev->rx_work, hci_rx_work);
3896 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3897 INIT_WORK(&hdev->tx_work, hci_tx_work);
3898 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003899
David Herrmannb1b813d2012-04-22 14:39:58 +02003900 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3901 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3902 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3903
David Herrmannb1b813d2012-04-22 14:39:58 +02003904 skb_queue_head_init(&hdev->rx_q);
3905 skb_queue_head_init(&hdev->cmd_q);
3906 skb_queue_head_init(&hdev->raw_q);
3907
3908 init_waitqueue_head(&hdev->req_wait_q);
3909
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003910 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003911
David Herrmannb1b813d2012-04-22 14:39:58 +02003912 hci_init_sysfs(hdev);
3913 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003914
3915 return hdev;
3916}
3917EXPORT_SYMBOL(hci_alloc_dev);
3918
3919/* Free HCI device */
3920void hci_free_dev(struct hci_dev *hdev)
3921{
David Herrmann9be0dab2012-04-22 14:39:57 +02003922 /* will free via device release */
3923 put_device(&hdev->dev);
3924}
3925EXPORT_SYMBOL(hci_free_dev);
3926
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927/* Register HCI device */
3928int hci_register_dev(struct hci_dev *hdev)
3929{
David Herrmannb1b813d2012-04-22 14:39:58 +02003930 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931
David Herrmann010666a2012-01-07 15:47:07 +01003932 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 return -EINVAL;
3934
Mat Martineau08add512011-11-02 16:18:36 -07003935 /* Do not allow HCI_AMP devices to register at index 0,
3936 * so the index can be used as the AMP controller ID.
3937 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003938 switch (hdev->dev_type) {
3939 case HCI_BREDR:
3940 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3941 break;
3942 case HCI_AMP:
3943 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3944 break;
3945 default:
3946 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003948
Sasha Levin3df92b32012-05-27 22:36:56 +02003949 if (id < 0)
3950 return id;
3951
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 sprintf(hdev->name, "hci%d", id);
3953 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003954
3955 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3956
Kees Cookd8537542013-07-03 15:04:57 -07003957 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3958 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003959 if (!hdev->workqueue) {
3960 error = -ENOMEM;
3961 goto err;
3962 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003963
Kees Cookd8537542013-07-03 15:04:57 -07003964 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3965 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003966 if (!hdev->req_workqueue) {
3967 destroy_workqueue(hdev->workqueue);
3968 error = -ENOMEM;
3969 goto err;
3970 }
3971
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003972 if (!IS_ERR_OR_NULL(bt_debugfs))
3973 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3974
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003975 dev_set_name(&hdev->dev, "%s", hdev->name);
3976
Johan Hedberg99780a72014-02-18 10:40:07 +02003977 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3978 CRYPTO_ALG_ASYNC);
3979 if (IS_ERR(hdev->tfm_aes)) {
3980 BT_ERR("Unable to create crypto context");
3981 error = PTR_ERR(hdev->tfm_aes);
3982 hdev->tfm_aes = NULL;
3983 goto err_wqueue;
3984 }
3985
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003986 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003987 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003988 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003990 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003991 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3992 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003993 if (hdev->rfkill) {
3994 if (rfkill_register(hdev->rfkill) < 0) {
3995 rfkill_destroy(hdev->rfkill);
3996 hdev->rfkill = NULL;
3997 }
3998 }
3999
Johan Hedberg5e130362013-09-13 08:58:17 +03004000 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4001 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4002
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004003 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004004 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004005
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004006 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004007 /* Assume BR/EDR support until proven otherwise (such as
4008 * through reading supported features during init.
4009 */
4010 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4011 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004012
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004013 write_lock(&hci_dev_list_lock);
4014 list_add(&hdev->list, &hci_dev_list);
4015 write_unlock(&hci_dev_list_lock);
4016
Marcel Holtmann4a964402014-07-02 19:10:33 +02004017 /* Devices that are marked for raw-only usage are unconfigured
4018 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004019 */
4020 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004021 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004022
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004024 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025
Johan Hedberg19202572013-01-14 22:33:51 +02004026 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004027
Linus Torvalds1da177e2005-04-16 15:20:36 -07004028 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004029
Johan Hedberg99780a72014-02-18 10:40:07 +02004030err_tfm:
4031 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02004032err_wqueue:
4033 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004034 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004035err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004036 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004037
David Herrmann33ca9542011-10-08 14:58:49 +02004038 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039}
4040EXPORT_SYMBOL(hci_register_dev);
4041
4042/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004043void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044{
Sasha Levin3df92b32012-05-27 22:36:56 +02004045 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004046
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004047 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048
Johan Hovold94324962012-03-15 14:48:41 +01004049 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4050
Sasha Levin3df92b32012-05-27 22:36:56 +02004051 id = hdev->id;
4052
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004053 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004055 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 hci_dev_do_close(hdev);
4058
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304059 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004060 kfree_skb(hdev->reassembly[i]);
4061
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004062 cancel_work_sync(&hdev->power_on);
4063
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004064 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004065 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4066 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004067 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004068 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004069 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004070 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004071
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004072 /* mgmt_index_removed should take care of emptying the
4073 * pending list */
4074 BUG_ON(!list_empty(&hdev->mgmt_pending));
4075
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 hci_notify(hdev, HCI_DEV_UNREG);
4077
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004078 if (hdev->rfkill) {
4079 rfkill_unregister(hdev->rfkill);
4080 rfkill_destroy(hdev->rfkill);
4081 }
4082
Johan Hedberg99780a72014-02-18 10:40:07 +02004083 if (hdev->tfm_aes)
4084 crypto_free_blkcipher(hdev->tfm_aes);
4085
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004086 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004087
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004088 debugfs_remove_recursive(hdev->debugfs);
4089
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004090 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004091 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004092
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004093 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004094 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004095 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004096 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004097 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004098 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004099 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004100 hci_white_list_clear(hdev);
Johan Hedberg373110c2014-07-02 17:37:25 +03004101 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004102 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004103
David Herrmanndc946bd2012-01-07 15:47:24 +01004104 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004105
4106 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107}
4108EXPORT_SYMBOL(hci_unregister_dev);
4109
4110/* Suspend HCI device */
4111int hci_suspend_dev(struct hci_dev *hdev)
4112{
4113 hci_notify(hdev, HCI_DEV_SUSPEND);
4114 return 0;
4115}
4116EXPORT_SYMBOL(hci_suspend_dev);
4117
4118/* Resume HCI device */
4119int hci_resume_dev(struct hci_dev *hdev)
4120{
4121 hci_notify(hdev, HCI_DEV_RESUME);
4122 return 0;
4123}
4124EXPORT_SYMBOL(hci_resume_dev);
4125
Marcel Holtmann76bca882009-11-18 00:40:39 +01004126/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004127int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004128{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004129 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004130 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004131 kfree_skb(skb);
4132 return -ENXIO;
4133 }
4134
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004135 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004136 bt_cb(skb)->incoming = 1;
4137
4138 /* Time stamp */
4139 __net_timestamp(skb);
4140
Marcel Holtmann76bca882009-11-18 00:40:39 +01004141 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004142 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004143
Marcel Holtmann76bca882009-11-18 00:40:39 +01004144 return 0;
4145}
4146EXPORT_SYMBOL(hci_recv_frame);
4147
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304148static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004149 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304150{
4151 int len = 0;
4152 int hlen = 0;
4153 int remain = count;
4154 struct sk_buff *skb;
4155 struct bt_skb_cb *scb;
4156
4157 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004158 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304159 return -EILSEQ;
4160
4161 skb = hdev->reassembly[index];
4162
4163 if (!skb) {
4164 switch (type) {
4165 case HCI_ACLDATA_PKT:
4166 len = HCI_MAX_FRAME_SIZE;
4167 hlen = HCI_ACL_HDR_SIZE;
4168 break;
4169 case HCI_EVENT_PKT:
4170 len = HCI_MAX_EVENT_SIZE;
4171 hlen = HCI_EVENT_HDR_SIZE;
4172 break;
4173 case HCI_SCODATA_PKT:
4174 len = HCI_MAX_SCO_SIZE;
4175 hlen = HCI_SCO_HDR_SIZE;
4176 break;
4177 }
4178
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004179 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304180 if (!skb)
4181 return -ENOMEM;
4182
4183 scb = (void *) skb->cb;
4184 scb->expect = hlen;
4185 scb->pkt_type = type;
4186
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304187 hdev->reassembly[index] = skb;
4188 }
4189
4190 while (count) {
4191 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004192 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304193
4194 memcpy(skb_put(skb, len), data, len);
4195
4196 count -= len;
4197 data += len;
4198 scb->expect -= len;
4199 remain = count;
4200
4201 switch (type) {
4202 case HCI_EVENT_PKT:
4203 if (skb->len == HCI_EVENT_HDR_SIZE) {
4204 struct hci_event_hdr *h = hci_event_hdr(skb);
4205 scb->expect = h->plen;
4206
4207 if (skb_tailroom(skb) < scb->expect) {
4208 kfree_skb(skb);
4209 hdev->reassembly[index] = NULL;
4210 return -ENOMEM;
4211 }
4212 }
4213 break;
4214
4215 case HCI_ACLDATA_PKT:
4216 if (skb->len == HCI_ACL_HDR_SIZE) {
4217 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4218 scb->expect = __le16_to_cpu(h->dlen);
4219
4220 if (skb_tailroom(skb) < scb->expect) {
4221 kfree_skb(skb);
4222 hdev->reassembly[index] = NULL;
4223 return -ENOMEM;
4224 }
4225 }
4226 break;
4227
4228 case HCI_SCODATA_PKT:
4229 if (skb->len == HCI_SCO_HDR_SIZE) {
4230 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4231 scb->expect = h->dlen;
4232
4233 if (skb_tailroom(skb) < scb->expect) {
4234 kfree_skb(skb);
4235 hdev->reassembly[index] = NULL;
4236 return -ENOMEM;
4237 }
4238 }
4239 break;
4240 }
4241
4242 if (scb->expect == 0) {
4243 /* Complete frame */
4244
4245 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004246 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304247
4248 hdev->reassembly[index] = NULL;
4249 return remain;
4250 }
4251 }
4252
4253 return remain;
4254}
4255
Marcel Holtmannef222012007-07-11 06:42:04 +02004256int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4257{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304258 int rem = 0;
4259
Marcel Holtmannef222012007-07-11 06:42:04 +02004260 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4261 return -EILSEQ;
4262
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004263 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004264 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304265 if (rem < 0)
4266 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004267
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304268 data += (count - rem);
4269 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004270 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004271
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304272 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004273}
4274EXPORT_SYMBOL(hci_recv_fragment);
4275
Suraj Sumangala99811512010-07-14 13:02:19 +05304276#define STREAM_REASSEMBLY 0
4277
4278int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4279{
4280 int type;
4281 int rem = 0;
4282
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004283 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304284 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4285
4286 if (!skb) {
4287 struct { char type; } *pkt;
4288
4289 /* Start of the frame */
4290 pkt = data;
4291 type = pkt->type;
4292
4293 data++;
4294 count--;
4295 } else
4296 type = bt_cb(skb)->pkt_type;
4297
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004298 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004299 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304300 if (rem < 0)
4301 return rem;
4302
4303 data += (count - rem);
4304 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004305 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304306
4307 return rem;
4308}
4309EXPORT_SYMBOL(hci_recv_stream_fragment);
4310
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311/* ---- Interface to upper protocols ---- */
4312
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313int hci_register_cb(struct hci_cb *cb)
4314{
4315 BT_DBG("%p name %s", cb, cb->name);
4316
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004317 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004319 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004320
4321 return 0;
4322}
4323EXPORT_SYMBOL(hci_register_cb);
4324
4325int hci_unregister_cb(struct hci_cb *cb)
4326{
4327 BT_DBG("%p name %s", cb, cb->name);
4328
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004329 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004331 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332
4333 return 0;
4334}
4335EXPORT_SYMBOL(hci_unregister_cb);
4336
Marcel Holtmann51086992013-10-10 14:54:19 -07004337static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004338{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004339 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004341 /* Time stamp */
4342 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004343
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004344 /* Send copy to monitor */
4345 hci_send_to_monitor(hdev, skb);
4346
4347 if (atomic_read(&hdev->promisc)) {
4348 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004349 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 }
4351
4352 /* Get rid of skb owner, prior to sending to the driver. */
4353 skb_orphan(skb);
4354
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004355 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004356 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357}
4358
Johan Hedberg3119ae92013-03-05 20:37:44 +02004359void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4360{
4361 skb_queue_head_init(&req->cmd_q);
4362 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004363 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004364}
4365
4366int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4367{
4368 struct hci_dev *hdev = req->hdev;
4369 struct sk_buff *skb;
4370 unsigned long flags;
4371
4372 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4373
Andre Guedes5d73e032013-03-08 11:20:16 -03004374 /* If an error occured during request building, remove all HCI
4375 * commands queued on the HCI request queue.
4376 */
4377 if (req->err) {
4378 skb_queue_purge(&req->cmd_q);
4379 return req->err;
4380 }
4381
Johan Hedberg3119ae92013-03-05 20:37:44 +02004382 /* Do not allow empty requests */
4383 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004384 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004385
4386 skb = skb_peek_tail(&req->cmd_q);
4387 bt_cb(skb)->req.complete = complete;
4388
4389 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4390 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4391 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4392
4393 queue_work(hdev->workqueue, &hdev->cmd_work);
4394
4395 return 0;
4396}
4397
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004398static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004399 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400{
4401 int len = HCI_COMMAND_HDR_SIZE + plen;
4402 struct hci_command_hdr *hdr;
4403 struct sk_buff *skb;
4404
Linus Torvalds1da177e2005-04-16 15:20:36 -07004405 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004406 if (!skb)
4407 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004408
4409 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004410 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411 hdr->plen = plen;
4412
4413 if (plen)
4414 memcpy(skb_put(skb, plen), param, plen);
4415
4416 BT_DBG("skb len %d", skb->len);
4417
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004418 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004419
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004420 return skb;
4421}
4422
4423/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004424int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4425 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004426{
4427 struct sk_buff *skb;
4428
4429 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4430
4431 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4432 if (!skb) {
4433 BT_ERR("%s no memory for command", hdev->name);
4434 return -ENOMEM;
4435 }
4436
Johan Hedberg11714b32013-03-05 20:37:47 +02004437 /* Stand-alone HCI commands must be flaged as
4438 * single-command requests.
4439 */
4440 bt_cb(skb)->req.start = true;
4441
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004443 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444
4445 return 0;
4446}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447
Johan Hedberg71c76a12013-03-05 20:37:46 +02004448/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004449void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4450 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004451{
4452 struct hci_dev *hdev = req->hdev;
4453 struct sk_buff *skb;
4454
4455 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4456
Andre Guedes34739c12013-03-08 11:20:18 -03004457 /* If an error occured during request building, there is no point in
4458 * queueing the HCI command. We can simply return.
4459 */
4460 if (req->err)
4461 return;
4462
Johan Hedberg71c76a12013-03-05 20:37:46 +02004463 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4464 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004465 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4466 hdev->name, opcode);
4467 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004468 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004469 }
4470
4471 if (skb_queue_empty(&req->cmd_q))
4472 bt_cb(skb)->req.start = true;
4473
Johan Hedberg02350a72013-04-03 21:50:29 +03004474 bt_cb(skb)->req.event = event;
4475
Johan Hedberg71c76a12013-03-05 20:37:46 +02004476 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004477}
4478
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004479void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4480 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004481{
4482 hci_req_add_ev(req, opcode, plen, param, 0);
4483}
4484
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004486void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487{
4488 struct hci_command_hdr *hdr;
4489
4490 if (!hdev->sent_cmd)
4491 return NULL;
4492
4493 hdr = (void *) hdev->sent_cmd->data;
4494
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004495 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 return NULL;
4497
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004498 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499
4500 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4501}
4502
4503/* Send ACL data */
4504static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4505{
4506 struct hci_acl_hdr *hdr;
4507 int len = skb->len;
4508
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004509 skb_push(skb, HCI_ACL_HDR_SIZE);
4510 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004511 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004512 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4513 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514}
4515
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004516static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004517 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004518{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004519 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 struct hci_dev *hdev = conn->hdev;
4521 struct sk_buff *list;
4522
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004523 skb->len = skb_headlen(skb);
4524 skb->data_len = 0;
4525
4526 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004527
4528 switch (hdev->dev_type) {
4529 case HCI_BREDR:
4530 hci_add_acl_hdr(skb, conn->handle, flags);
4531 break;
4532 case HCI_AMP:
4533 hci_add_acl_hdr(skb, chan->handle, flags);
4534 break;
4535 default:
4536 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4537 return;
4538 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004539
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004540 list = skb_shinfo(skb)->frag_list;
4541 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542 /* Non fragmented */
4543 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4544
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004545 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 } else {
4547 /* Fragmented */
4548 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4549
4550 skb_shinfo(skb)->frag_list = NULL;
4551
4552 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004553 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004555 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004556
4557 flags &= ~ACL_START;
4558 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004559 do {
4560 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004561
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004562 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004563 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004564
4565 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4566
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004567 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 } while (list);
4569
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004570 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004572}
4573
4574void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4575{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004576 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004577
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004578 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004579
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004580 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004582 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004584
4585/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004586void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004587{
4588 struct hci_dev *hdev = conn->hdev;
4589 struct hci_sco_hdr hdr;
4590
4591 BT_DBG("%s len %d", hdev->name, skb->len);
4592
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004593 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004594 hdr.dlen = skb->len;
4595
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004596 skb_push(skb, HCI_SCO_HDR_SIZE);
4597 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004598 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004599
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004600 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004601
Linus Torvalds1da177e2005-04-16 15:20:36 -07004602 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004603 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004604}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004605
4606/* ---- HCI TX task (outgoing data) ---- */
4607
4608/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004609static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4610 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611{
4612 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004613 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004614 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004616 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004618
4619 rcu_read_lock();
4620
4621 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004622 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004624
4625 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4626 continue;
4627
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628 num++;
4629
4630 if (c->sent < min) {
4631 min = c->sent;
4632 conn = c;
4633 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004634
4635 if (hci_conn_num(hdev, type) == num)
4636 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004637 }
4638
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004639 rcu_read_unlock();
4640
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004642 int cnt, q;
4643
4644 switch (conn->type) {
4645 case ACL_LINK:
4646 cnt = hdev->acl_cnt;
4647 break;
4648 case SCO_LINK:
4649 case ESCO_LINK:
4650 cnt = hdev->sco_cnt;
4651 break;
4652 case LE_LINK:
4653 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4654 break;
4655 default:
4656 cnt = 0;
4657 BT_ERR("Unknown link type");
4658 }
4659
4660 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004661 *quote = q ? q : 1;
4662 } else
4663 *quote = 0;
4664
4665 BT_DBG("conn %p quote %d", conn, *quote);
4666 return conn;
4667}
4668
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004669static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670{
4671 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004672 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673
Ville Tervobae1f5d92011-02-10 22:38:53 -03004674 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004675
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004676 rcu_read_lock();
4677
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004679 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004680 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004681 BT_ERR("%s killing stalled connection %pMR",
4682 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004683 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 }
4685 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004686
4687 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688}
4689
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004690static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4691 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004692{
4693 struct hci_conn_hash *h = &hdev->conn_hash;
4694 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004695 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004696 struct hci_conn *conn;
4697 int cnt, q, conn_num = 0;
4698
4699 BT_DBG("%s", hdev->name);
4700
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004701 rcu_read_lock();
4702
4703 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004704 struct hci_chan *tmp;
4705
4706 if (conn->type != type)
4707 continue;
4708
4709 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4710 continue;
4711
4712 conn_num++;
4713
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004714 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004715 struct sk_buff *skb;
4716
4717 if (skb_queue_empty(&tmp->data_q))
4718 continue;
4719
4720 skb = skb_peek(&tmp->data_q);
4721 if (skb->priority < cur_prio)
4722 continue;
4723
4724 if (skb->priority > cur_prio) {
4725 num = 0;
4726 min = ~0;
4727 cur_prio = skb->priority;
4728 }
4729
4730 num++;
4731
4732 if (conn->sent < min) {
4733 min = conn->sent;
4734 chan = tmp;
4735 }
4736 }
4737
4738 if (hci_conn_num(hdev, type) == conn_num)
4739 break;
4740 }
4741
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004742 rcu_read_unlock();
4743
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004744 if (!chan)
4745 return NULL;
4746
4747 switch (chan->conn->type) {
4748 case ACL_LINK:
4749 cnt = hdev->acl_cnt;
4750 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004751 case AMP_LINK:
4752 cnt = hdev->block_cnt;
4753 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004754 case SCO_LINK:
4755 case ESCO_LINK:
4756 cnt = hdev->sco_cnt;
4757 break;
4758 case LE_LINK:
4759 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4760 break;
4761 default:
4762 cnt = 0;
4763 BT_ERR("Unknown link type");
4764 }
4765
4766 q = cnt / num;
4767 *quote = q ? q : 1;
4768 BT_DBG("chan %p quote %d", chan, *quote);
4769 return chan;
4770}
4771
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004772static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4773{
4774 struct hci_conn_hash *h = &hdev->conn_hash;
4775 struct hci_conn *conn;
4776 int num = 0;
4777
4778 BT_DBG("%s", hdev->name);
4779
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004780 rcu_read_lock();
4781
4782 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004783 struct hci_chan *chan;
4784
4785 if (conn->type != type)
4786 continue;
4787
4788 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4789 continue;
4790
4791 num++;
4792
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004793 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004794 struct sk_buff *skb;
4795
4796 if (chan->sent) {
4797 chan->sent = 0;
4798 continue;
4799 }
4800
4801 if (skb_queue_empty(&chan->data_q))
4802 continue;
4803
4804 skb = skb_peek(&chan->data_q);
4805 if (skb->priority >= HCI_PRIO_MAX - 1)
4806 continue;
4807
4808 skb->priority = HCI_PRIO_MAX - 1;
4809
4810 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004811 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004812 }
4813
4814 if (hci_conn_num(hdev, type) == num)
4815 break;
4816 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004817
4818 rcu_read_unlock();
4819
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004820}
4821
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004822static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4823{
4824 /* Calculate count of blocks used by this packet */
4825 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4826}
4827
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004828static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004829{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004830 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004831 /* ACL tx timeout must be longer than maximum
4832 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004833 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004834 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004835 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004837}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004839static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004840{
4841 unsigned int cnt = hdev->acl_cnt;
4842 struct hci_chan *chan;
4843 struct sk_buff *skb;
4844 int quote;
4845
4846 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004847
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004848 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004849 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004850 u32 priority = (skb_peek(&chan->data_q))->priority;
4851 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004852 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004853 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004854
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004855 /* Stop if priority has changed */
4856 if (skb->priority < priority)
4857 break;
4858
4859 skb = skb_dequeue(&chan->data_q);
4860
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004861 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004862 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004863
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004864 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865 hdev->acl_last_tx = jiffies;
4866
4867 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004868 chan->sent++;
4869 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004870 }
4871 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004872
4873 if (cnt != hdev->acl_cnt)
4874 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004875}
4876
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004877static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004878{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004879 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004880 struct hci_chan *chan;
4881 struct sk_buff *skb;
4882 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004883 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004884
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004885 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004886
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004887 BT_DBG("%s", hdev->name);
4888
4889 if (hdev->dev_type == HCI_AMP)
4890 type = AMP_LINK;
4891 else
4892 type = ACL_LINK;
4893
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004894 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004895 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004896 u32 priority = (skb_peek(&chan->data_q))->priority;
4897 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4898 int blocks;
4899
4900 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004901 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004902
4903 /* Stop if priority has changed */
4904 if (skb->priority < priority)
4905 break;
4906
4907 skb = skb_dequeue(&chan->data_q);
4908
4909 blocks = __get_blocks(hdev, skb);
4910 if (blocks > hdev->block_cnt)
4911 return;
4912
4913 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004914 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004915
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004916 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004917 hdev->acl_last_tx = jiffies;
4918
4919 hdev->block_cnt -= blocks;
4920 quote -= blocks;
4921
4922 chan->sent += blocks;
4923 chan->conn->sent += blocks;
4924 }
4925 }
4926
4927 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004928 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004929}
4930
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004931static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004932{
4933 BT_DBG("%s", hdev->name);
4934
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004935 /* No ACL link over BR/EDR controller */
4936 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4937 return;
4938
4939 /* No AMP link over AMP controller */
4940 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004941 return;
4942
4943 switch (hdev->flow_ctl_mode) {
4944 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4945 hci_sched_acl_pkt(hdev);
4946 break;
4947
4948 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4949 hci_sched_acl_blk(hdev);
4950 break;
4951 }
4952}
4953
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004955static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956{
4957 struct hci_conn *conn;
4958 struct sk_buff *skb;
4959 int quote;
4960
4961 BT_DBG("%s", hdev->name);
4962
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004963 if (!hci_conn_num(hdev, SCO_LINK))
4964 return;
4965
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4967 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4968 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004969 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970
4971 conn->sent++;
4972 if (conn->sent == ~0)
4973 conn->sent = 0;
4974 }
4975 }
4976}
4977
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004978static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004979{
4980 struct hci_conn *conn;
4981 struct sk_buff *skb;
4982 int quote;
4983
4984 BT_DBG("%s", hdev->name);
4985
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004986 if (!hci_conn_num(hdev, ESCO_LINK))
4987 return;
4988
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004989 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4990 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004991 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4992 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004993 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004994
4995 conn->sent++;
4996 if (conn->sent == ~0)
4997 conn->sent = 0;
4998 }
4999 }
5000}
5001
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005002static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005003{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005004 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005005 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005006 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005007
5008 BT_DBG("%s", hdev->name);
5009
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005010 if (!hci_conn_num(hdev, LE_LINK))
5011 return;
5012
Marcel Holtmann4a964402014-07-02 19:10:33 +02005013 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005014 /* LE tx timeout must be longer than maximum
5015 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005016 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005017 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005018 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005019 }
5020
5021 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005022 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005023 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005024 u32 priority = (skb_peek(&chan->data_q))->priority;
5025 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005026 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005027 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005028
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005029 /* Stop if priority has changed */
5030 if (skb->priority < priority)
5031 break;
5032
5033 skb = skb_dequeue(&chan->data_q);
5034
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005035 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005036 hdev->le_last_tx = jiffies;
5037
5038 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005039 chan->sent++;
5040 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005041 }
5042 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005043
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005044 if (hdev->le_pkts)
5045 hdev->le_cnt = cnt;
5046 else
5047 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005048
5049 if (cnt != tmp)
5050 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005051}
5052
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005053static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005054{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005055 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005056 struct sk_buff *skb;
5057
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005058 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005059 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060
Marcel Holtmann52de5992013-09-03 18:08:38 -07005061 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5062 /* Schedule queues and send stuff to HCI driver */
5063 hci_sched_acl(hdev);
5064 hci_sched_sco(hdev);
5065 hci_sched_esco(hdev);
5066 hci_sched_le(hdev);
5067 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005068
Linus Torvalds1da177e2005-04-16 15:20:36 -07005069 /* Send next queued raw (unknown type) packet */
5070 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005071 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005072}
5073
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005074/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005075
5076/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005077static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005078{
5079 struct hci_acl_hdr *hdr = (void *) skb->data;
5080 struct hci_conn *conn;
5081 __u16 handle, flags;
5082
5083 skb_pull(skb, HCI_ACL_HDR_SIZE);
5084
5085 handle = __le16_to_cpu(hdr->handle);
5086 flags = hci_flags(handle);
5087 handle = hci_handle(handle);
5088
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005089 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005090 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091
5092 hdev->stat.acl_rx++;
5093
5094 hci_dev_lock(hdev);
5095 conn = hci_conn_hash_lookup_handle(hdev, handle);
5096 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005097
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005099 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005100
Linus Torvalds1da177e2005-04-16 15:20:36 -07005101 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005102 l2cap_recv_acldata(conn, skb, flags);
5103 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005105 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005106 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005107 }
5108
5109 kfree_skb(skb);
5110}
5111
5112/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005113static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005114{
5115 struct hci_sco_hdr *hdr = (void *) skb->data;
5116 struct hci_conn *conn;
5117 __u16 handle;
5118
5119 skb_pull(skb, HCI_SCO_HDR_SIZE);
5120
5121 handle = __le16_to_cpu(hdr->handle);
5122
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005123 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124
5125 hdev->stat.sco_rx++;
5126
5127 hci_dev_lock(hdev);
5128 conn = hci_conn_hash_lookup_handle(hdev, handle);
5129 hci_dev_unlock(hdev);
5130
5131 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005132 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005133 sco_recv_scodata(conn, skb);
5134 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005136 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005137 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005138 }
5139
5140 kfree_skb(skb);
5141}
5142
Johan Hedberg9238f362013-03-05 20:37:48 +02005143static bool hci_req_is_complete(struct hci_dev *hdev)
5144{
5145 struct sk_buff *skb;
5146
5147 skb = skb_peek(&hdev->cmd_q);
5148 if (!skb)
5149 return true;
5150
5151 return bt_cb(skb)->req.start;
5152}
5153
Johan Hedberg42c6b122013-03-05 20:37:49 +02005154static void hci_resend_last(struct hci_dev *hdev)
5155{
5156 struct hci_command_hdr *sent;
5157 struct sk_buff *skb;
5158 u16 opcode;
5159
5160 if (!hdev->sent_cmd)
5161 return;
5162
5163 sent = (void *) hdev->sent_cmd->data;
5164 opcode = __le16_to_cpu(sent->opcode);
5165 if (opcode == HCI_OP_RESET)
5166 return;
5167
5168 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5169 if (!skb)
5170 return;
5171
5172 skb_queue_head(&hdev->cmd_q, skb);
5173 queue_work(hdev->workqueue, &hdev->cmd_work);
5174}
5175
Johan Hedberg9238f362013-03-05 20:37:48 +02005176void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5177{
5178 hci_req_complete_t req_complete = NULL;
5179 struct sk_buff *skb;
5180 unsigned long flags;
5181
5182 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5183
Johan Hedberg42c6b122013-03-05 20:37:49 +02005184 /* If the completed command doesn't match the last one that was
5185 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005186 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005187 if (!hci_sent_cmd_data(hdev, opcode)) {
5188 /* Some CSR based controllers generate a spontaneous
5189 * reset complete event during init and any pending
5190 * command will never be completed. In such a case we
5191 * need to resend whatever was the last sent
5192 * command.
5193 */
5194 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5195 hci_resend_last(hdev);
5196
Johan Hedberg9238f362013-03-05 20:37:48 +02005197 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005198 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005199
5200 /* If the command succeeded and there's still more commands in
5201 * this request the request is not yet complete.
5202 */
5203 if (!status && !hci_req_is_complete(hdev))
5204 return;
5205
5206 /* If this was the last command in a request the complete
5207 * callback would be found in hdev->sent_cmd instead of the
5208 * command queue (hdev->cmd_q).
5209 */
5210 if (hdev->sent_cmd) {
5211 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005212
5213 if (req_complete) {
5214 /* We must set the complete callback to NULL to
5215 * avoid calling the callback more than once if
5216 * this function gets called again.
5217 */
5218 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5219
Johan Hedberg9238f362013-03-05 20:37:48 +02005220 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005221 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005222 }
5223
5224 /* Remove all pending commands belonging to this request */
5225 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5226 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5227 if (bt_cb(skb)->req.start) {
5228 __skb_queue_head(&hdev->cmd_q, skb);
5229 break;
5230 }
5231
5232 req_complete = bt_cb(skb)->req.complete;
5233 kfree_skb(skb);
5234 }
5235 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5236
5237call_complete:
5238 if (req_complete)
5239 req_complete(hdev, status);
5240}
5241
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005242static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005244 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 struct sk_buff *skb;
5246
5247 BT_DBG("%s", hdev->name);
5248
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005250 /* Send copy to monitor */
5251 hci_send_to_monitor(hdev, skb);
5252
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253 if (atomic_read(&hdev->promisc)) {
5254 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005255 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005256 }
5257
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005258 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 kfree_skb(skb);
5260 continue;
5261 }
5262
5263 if (test_bit(HCI_INIT, &hdev->flags)) {
5264 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005265 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 case HCI_ACLDATA_PKT:
5267 case HCI_SCODATA_PKT:
5268 kfree_skb(skb);
5269 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005270 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 }
5272
5273 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005274 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005276 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 hci_event_packet(hdev, skb);
5278 break;
5279
5280 case HCI_ACLDATA_PKT:
5281 BT_DBG("%s ACL data packet", hdev->name);
5282 hci_acldata_packet(hdev, skb);
5283 break;
5284
5285 case HCI_SCODATA_PKT:
5286 BT_DBG("%s SCO data packet", hdev->name);
5287 hci_scodata_packet(hdev, skb);
5288 break;
5289
5290 default:
5291 kfree_skb(skb);
5292 break;
5293 }
5294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295}
5296
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005297static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005299 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 struct sk_buff *skb;
5301
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005302 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5303 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304
Linus Torvalds1da177e2005-04-16 15:20:36 -07005305 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005306 if (atomic_read(&hdev->cmd_cnt)) {
5307 skb = skb_dequeue(&hdev->cmd_q);
5308 if (!skb)
5309 return;
5310
Wei Yongjun7585b972009-02-25 18:29:52 +08005311 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005313 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005314 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005316 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005317 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005318 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005319 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005320 schedule_delayed_work(&hdev->cmd_timer,
5321 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322 } else {
5323 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005324 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005325 }
5326 }
5327}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005328
5329void hci_req_add_le_scan_disable(struct hci_request *req)
5330{
5331 struct hci_cp_le_set_scan_enable cp;
5332
5333 memset(&cp, 0, sizeof(cp));
5334 cp.enable = LE_SCAN_DISABLE;
5335 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5336}
Andre Guedesa4790db2014-02-26 20:21:47 -03005337
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005338void hci_req_add_le_passive_scan(struct hci_request *req)
5339{
5340 struct hci_cp_le_set_scan_param param_cp;
5341 struct hci_cp_le_set_scan_enable enable_cp;
5342 struct hci_dev *hdev = req->hdev;
5343 u8 own_addr_type;
5344
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005345 /* Set require_privacy to false since no SCAN_REQ are send
5346 * during passive scanning. Not using an unresolvable address
5347 * here is important so that peer devices using direct
5348 * advertising with our address will be correctly reported
5349 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005350 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005351 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005352 return;
5353
5354 memset(&param_cp, 0, sizeof(param_cp));
5355 param_cp.type = LE_SCAN_PASSIVE;
5356 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5357 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5358 param_cp.own_address_type = own_addr_type;
5359 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5360 &param_cp);
5361
5362 memset(&enable_cp, 0, sizeof(enable_cp));
5363 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005364 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005365 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5366 &enable_cp);
5367}
5368
Andre Guedesa4790db2014-02-26 20:21:47 -03005369static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5370{
5371 if (status)
5372 BT_DBG("HCI request failed to update background scanning: "
5373 "status 0x%2.2x", status);
5374}
5375
5376/* This function controls the background scanning based on hdev->pend_le_conns
5377 * list. If there are pending LE connection we start the background scanning,
5378 * otherwise we stop it.
5379 *
5380 * This function requires the caller holds hdev->lock.
5381 */
5382void hci_update_background_scan(struct hci_dev *hdev)
5383{
Andre Guedesa4790db2014-02-26 20:21:47 -03005384 struct hci_request req;
5385 struct hci_conn *conn;
5386 int err;
5387
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005388 if (!test_bit(HCI_UP, &hdev->flags) ||
5389 test_bit(HCI_INIT, &hdev->flags) ||
5390 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005391 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005392 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005393 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005394 return;
5395
Andre Guedesa4790db2014-02-26 20:21:47 -03005396 hci_req_init(&req, hdev);
5397
Johan Hedberg66f84552014-07-04 12:37:18 +03005398 if (list_empty(&hdev->pend_le_conns) &&
5399 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005400 /* If there is no pending LE connections or devices
5401 * to be scanned for, we should stop the background
5402 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005403 */
5404
5405 /* If controller is not scanning we are done. */
5406 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5407 return;
5408
5409 hci_req_add_le_scan_disable(&req);
5410
5411 BT_DBG("%s stopping background scanning", hdev->name);
5412 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005413 /* If there is at least one pending LE connection, we should
5414 * keep the background scan running.
5415 */
5416
Andre Guedesa4790db2014-02-26 20:21:47 -03005417 /* If controller is connecting, we should not start scanning
5418 * since some controllers are not able to scan and connect at
5419 * the same time.
5420 */
5421 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5422 if (conn)
5423 return;
5424
Andre Guedes4340a122014-03-10 18:26:24 -03005425 /* If controller is currently scanning, we stop it to ensure we
5426 * don't miss any advertising (due to duplicates filter).
5427 */
5428 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5429 hci_req_add_le_scan_disable(&req);
5430
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005431 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005432
5433 BT_DBG("%s starting background scanning", hdev->name);
5434 }
5435
5436 err = hci_req_run(&req, update_background_scan_complete);
5437 if (err)
5438 BT_ERR("Failed to run HCI request: err %d", err);
5439}