blob: 5c319a49a5a4ca8e510a28fe553b5c59c74c2a15 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700435 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700494 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700522 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
Marcel Holtmannac345812014-02-23 12:44:25 -0800598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
Johan Hedberga1f4c312014-02-27 14:05:41 +0200606 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800609 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700677{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700680
Marcel Holtmann111902f2014-06-21 04:53:17 +0200681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
685}
686
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
695
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
698
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700704 return -EINVAL;
705
Marcel Holtmann111902f2014-06-21 04:53:17 +0200706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700708
Marcel Holtmann111902f2014-06-21 04:53:17 +0200709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800710
711 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700712}
713
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
Marcel Holtmann92202182013-10-18 16:38:10 -0700720
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
Marcel Holtmann3698d702014-02-18 21:54:49 -0800746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200749 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800750
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200757 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200778 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700779
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800785 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200786 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700811 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700839 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200859static int conn_latency_set(void *data, u64 val)
860{
861 struct hci_dev *hdev = data;
862
863 if (val > 0x01f3)
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
867 hdev->le_conn_latency = val;
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
873static int conn_latency_get(void *data, u64 *val)
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
878 *val = hdev->le_conn_latency;
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
886
Marcel Holtmannf1649572014-06-30 12:34:38 +0200887static int supervision_timeout_set(void *data, u64 val)
888{
889 struct hci_dev *hdev = data;
890
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
899}
900
901static int supervision_timeout_get(void *data, u64 *val)
902{
903 struct hci_dev *hdev = data;
904
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
908
909 return 0;
910}
911
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
914
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
918
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
921
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
925
926 return 0;
927}
928
929static int adv_channel_map_get(void *data, u64 *val)
930{
931 struct hci_dev *hdev = data;
932
933 hci_dev_lock(hdev);
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
936
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
942
Georg Lukas729a1052014-07-26 13:59:58 +0200943static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200944{
Georg Lukas729a1052014-07-26 13:59:58 +0200945 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200946
Georg Lukas729a1052014-07-26 13:59:58 +0200947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200948 return -EINVAL;
949
Andre Guedes7d474e02014-02-26 20:21:54 -0300950 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200951 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
Georg Lukas729a1052014-07-26 13:59:58 +0200957static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300958{
Georg Lukas729a1052014-07-26 13:59:58 +0200959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300966}
967
Georg Lukas729a1052014-07-26 13:59:58 +0200968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300972{
Georg Lukas729a1052014-07-26 13:59:58 +0200973 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300974
Georg Lukas729a1052014-07-26 13:59:58 +0200975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 return -EINVAL;
977
Georg Lukas729a1052014-07-26 13:59:58 +0200978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300981
Georg Lukas729a1052014-07-26 13:59:58 +0200982 return 0;
983}
Andre Guedes7d474e02014-02-26 20:21:54 -0300984
Georg Lukas729a1052014-07-26 13:59:58 +0200985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300988
Georg Lukas729a1052014-07-26 13:59:58 +0200989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300992
Georg Lukas729a1052014-07-26 13:59:58 +0200993 return 0;
994}
Andre Guedes7d474e02014-02-26 20:21:54 -0300995
Georg Lukas729a1052014-07-26 13:59:58 +0200996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -0300998
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200999static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001000{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001002 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001003 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Andre Guedes7d474e02014-02-26 20:21:54 -03001005 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001008 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001015}
1016
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001017static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001018{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001020}
1021
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001024 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029/* ---- HCI requests ---- */
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
Fengguang Wu77a63e02013-04-20 16:24:31 +03001053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
Johan Hedberg75e84b72013-04-02 13:35:04 +03001084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001109 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001119 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001120
1121 hdev->req_status = HCI_REQ_PEND;
1122
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
Chan-yeol Park039fada2014-10-31 14:23:06 +09001126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001129 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001130 return ERR_PTR(err);
1131 }
1132
Johan Hedberg75e84b72013-04-02 13:35:04 +03001133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001166 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001176 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 hci_req_init(&req, hdev);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 hdev->req_status = HCI_REQ_PEND;
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001189
Chan-yeol Park039fada2014-10-31 14:23:06 +09001190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001195 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001196
Chan-yeol Park039fada2014-10-31 14:23:06 +09001197 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001198 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199
Andre Guedes920c8302013-03-08 11:20:15 -03001200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 */
Andre Guedes920c8302013-03-08 11:20:15 -03001205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
1427 u8 mode;
1428
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509}
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 struct hci_dev *hdev = req->hdev;
1514
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001605 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001650 hci_setup_event_mask(req);
1651
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001664 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677
Andre Guedes9193c6e2014-07-01 18:10:09 -03001678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
Andre Guedes9193c6e2014-07-01 18:10:09 -03001695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001704 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001714}
1715
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
Marcel Holtmann109e3192014-07-23 19:24:56 +02001724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001732 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001733 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001735
1736 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001737 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740 u8 support = 0x01;
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1743 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001744}
1745
Johan Hedberg2177bab2013-03-05 20:37:43 +02001746static int __hci_init(struct hci_dev *hdev)
1747{
1748 int err;
1749
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751 if (err < 0)
1752 return err;
1753
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1756 */
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759 &dut_mode_fops);
1760 }
1761
Johan Hedberg2177bab2013-03-05 20:37:43 +02001762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1764 * first stage init.
1765 */
1766 if (hdev->dev_type != HCI_BREDR)
1767 return 0;
1768
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770 if (err < 0)
1771 return err;
1772
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774 if (err < 0)
1775 return err;
1776
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778 if (err < 0)
1779 return err;
1780
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1783 */
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785 return 0;
1786
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1803
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001813 }
1814
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001815 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001822 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001823
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1831 }
1832
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001833 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1842
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1846 */
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1851
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876 hdev->debugfs,
1877 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001878
Johan Hedberg711eafe2014-08-08 09:32:52 +03001879 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001880 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001881
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001882 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001883}
1884
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001885static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886{
1887 struct hci_dev *hdev = req->hdev;
1888
1889 BT_DBG("%s %ld", hdev->name, opt);
1890
1891 /* Reset */
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1894
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901}
1902
1903static int __hci_unconf_init(struct hci_dev *hdev)
1904{
1905 int err;
1906
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 return 0;
1909
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911 if (err < 0)
1912 return err;
1913
1914 return 0;
1915}
1916
Johan Hedberg42c6b122013-03-05 20:37:49 +02001917static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918{
1919 __u8 scan = opt;
1920
Johan Hedberg42c6b122013-03-05 20:37:49 +02001921 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
1923 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925}
1926
Johan Hedberg42c6b122013-03-05 20:37:49 +02001927static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928{
1929 __u8 auth = opt;
1930
Johan Hedberg42c6b122013-03-05 20:37:49 +02001931 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935}
1936
Johan Hedberg42c6b122013-03-05 20:37:49 +02001937static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938{
1939 __u8 encrypt = opt;
1940
Johan Hedberg42c6b122013-03-05 20:37:49 +02001941 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001943 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945}
1946
Johan Hedberg42c6b122013-03-05 20:37:49 +02001947static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001948{
1949 __le16 policy = cpu_to_le16(opt);
1950
Johan Hedberg42c6b122013-03-05 20:37:49 +02001951 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001952
1953 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001955}
1956
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001957/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 * Device is held on return. */
1959struct hci_dev *hci_dev_get(int index)
1960{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001961 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
1963 BT_DBG("%d", index);
1964
1965 if (index < 0)
1966 return NULL;
1967
1968 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001969 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1972 break;
1973 }
1974 }
1975 read_unlock(&hci_dev_list_lock);
1976 return hdev;
1977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001980
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001981bool hci_discovery_active(struct hci_dev *hdev)
1982{
1983 struct discovery_state *discov = &hdev->discovery;
1984
Andre Guedes6fbe1952012-02-03 17:47:58 -03001985 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001986 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001987 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001988 return true;
1989
Andre Guedes6fbe1952012-02-03 17:47:58 -03001990 default:
1991 return false;
1992 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001993}
1994
Johan Hedbergff9ef572012-01-04 14:23:45 +02001995void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001997 int old_state = hdev->discovery.state;
1998
Johan Hedbergff9ef572012-01-04 14:23:45 +02001999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002001 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002002 return;
2003
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002004 hdev->discovery.state = state;
2005
Johan Hedbergff9ef572012-01-04 14:23:45 +02002006 switch (state) {
2007 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002008 hci_update_background_scan(hdev);
2009
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002010 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002011 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002012 break;
2013 case DISCOVERY_STARTING:
2014 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002015 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002016 mgmt_discovering(hdev, 1);
2017 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002018 case DISCOVERY_RESOLVING:
2019 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002020 case DISCOVERY_STOPPING:
2021 break;
2022 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002023}
2024
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002025void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Johan Hedberg30883512012-01-04 14:16:21 +02002027 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002028 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Johan Hedberg561aafb2012-01-04 13:31:59 +02002030 list_for_each_entry_safe(p, n, &cache->all, all) {
2031 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002032 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002034
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037}
2038
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002039struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041{
Johan Hedberg30883512012-01-04 14:16:21 +02002042 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 struct inquiry_entry *e;
2044
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002045 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002049 return e;
2050 }
2051
2052 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053}
2054
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002056 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057{
Johan Hedberg30883512012-01-04 14:16:21 +02002058 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002059 struct inquiry_entry *e;
2060
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002061 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002062
2063 list_for_each_entry(e, &cache->unknown, list) {
2064 if (!bacmp(&e->data.bdaddr, bdaddr))
2065 return e;
2066 }
2067
2068 return NULL;
2069}
2070
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002071struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002072 bdaddr_t *bdaddr,
2073 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002074{
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2077
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002079
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082 return e;
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2084 return e;
2085 }
2086
2087 return NULL;
2088}
2089
Johan Hedberga3d4e202012-01-09 00:53:02 +02002090void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002091 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002092{
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2096
2097 list_del(&ie->list);
2098
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002101 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002102 break;
2103 pos = &p->list;
2104 }
2105
2106 list_add(&ie->list, pos);
2107}
2108
Marcel Holtmannaf589252014-07-01 14:11:20 +02002109u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
Johan Hedberg30883512012-01-04 14:16:21 +02002112 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002113 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002114 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Szymon Janc2b2fec42012-11-20 11:38:54 +01002118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
Marcel Holtmannaf589252014-07-01 14:11:20 +02002120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002122
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002124 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002127
Johan Hedberga3d4e202012-01-09 00:53:02 +02002128 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002129 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2132 }
2133
Johan Hedberg561aafb2012-01-04 13:31:59 +02002134 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002135 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002136
Johan Hedberg561aafb2012-01-04 13:31:59 +02002137 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002139 if (!ie) {
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141 goto done;
2142 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002143
2144 list_add(&ie->all, &cache->all);
2145
2146 if (name_known) {
2147 ie->name_state = NAME_KNOWN;
2148 } else {
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2151 }
2152
2153update:
2154 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002155 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
2159
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002163
2164 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002166
Marcel Holtmannaf589252014-07-01 14:11:20 +02002167done:
2168 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169}
2170
2171static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172{
Johan Hedberg30883512012-01-04 14:16:21 +02002173 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2176 int copied = 0;
2177
Johan Hedberg561aafb2012-01-04 13:31:59 +02002178 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002180
2181 if (copied >= num)
2182 break;
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002192 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 }
2194
2195 BT_DBG("cache %p, copied %d", cache, copied);
2196 return copied;
2197}
2198
Johan Hedberg42c6b122013-03-05 20:37:49 +02002199static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002202 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 struct hci_cp_inquiry cp;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2208 return;
2209
2210 /* Start Inquiry */
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215}
2216
2217int hci_inquiry(void __user *arg)
2218{
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2223 long timeo;
2224 __u8 *buf;
2225
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2227 return -EFAULT;
2228
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002229 hdev = hci_dev_get(ir.dev_id);
2230 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 return -ENODEV;
2232
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234 err = -EBUSY;
2235 goto done;
2236 }
2237
Marcel Holtmann4a964402014-07-02 19:10:33 +02002238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002239 err = -EOPNOTSUPP;
2240 goto done;
2241 }
2242
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002243 if (hdev->dev_type != HCI_BREDR) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
Johan Hedberg56f87902013-10-02 13:43:13 +03002248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249 err = -EOPNOTSUPP;
2250 goto done;
2251 }
2252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002253 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002256 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 do_inquiry = 1;
2258 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002259 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Marcel Holtmann04837f62006-07-03 10:02:33 +02002261 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002262
2263 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002266 if (err < 0)
2267 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002268
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2271 */
NeilBrown74316202014-07-07 15:16:04 +10002272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002273 TASK_INTERRUPTIBLE))
2274 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002277 /* for unlimited number of responses we will use buffer with
2278 * 255 entries
2279 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2284 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002286 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 err = -ENOMEM;
2288 goto done;
2289 }
2290
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002291 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298 ptr += sizeof(ir);
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002300 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002302 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = -EFAULT;
2304
2305 kfree(buf);
2306
2307done:
2308 hci_dev_put(hdev);
2309 return err;
2310}
2311
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002312static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 int ret = 0;
2315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 BT_DBG("%s %p", hdev->name, hdev);
2317
2318 hci_req_lock(hdev);
2319
Johan Hovold94324962012-03-15 14:48:41 +01002320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321 ret = -ENODEV;
2322 goto done;
2323 }
2324
Marcel Holtmannd603b762014-07-06 12:11:14 +02002325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2329 */
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331 ret = -ERFKILL;
2332 goto done;
2333 }
2334
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2338 * or not.
2339 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2342 * available.
2343 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2346 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2352 goto done;
2353 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002354 }
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 if (test_bit(HCI_UP, &hdev->flags)) {
2357 ret = -EALREADY;
2358 goto done;
2359 }
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 if (hdev->open(hdev)) {
2362 ret = -EIO;
2363 goto done;
2364 }
2365
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2368
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370 if (hdev->setup)
2371 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002372
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2375 *
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2378 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002382
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2386 *
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2390 */
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002393 }
2394
Marcel Holtmann9713c172014-07-06 12:11:15 +02002395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2399 * on procedure.
2400 */
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404 else
2405 ret = -EADDRNOTAVAIL;
2406 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002407
2408 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002411 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 }
2413
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002414 clear_bit(HCI_INIT, &hdev->flags);
2415
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 if (!ret) {
2417 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002425 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002427 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002428 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002429 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002430 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002432 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002433 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002434 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2438
2439 if (hdev->flush)
2440 hdev->flush(hdev);
2441
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2445 }
2446
2447 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002448 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 }
2450
2451done:
2452 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 return ret;
2454}
2455
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002456/* ---- HCI ioctl helpers ---- */
2457
2458int hci_dev_open(__u16 dev)
2459{
2460 struct hci_dev *hdev;
2461 int err;
2462
2463 hdev = hci_dev_get(dev);
2464 if (!hdev)
2465 return -ENODEV;
2466
Marcel Holtmann4a964402014-07-02 19:10:33 +02002467 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2470 * possible.
2471 *
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2474 * open the device.
2475 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478 err = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
Johan Hedberge1d08f42013-10-01 22:44:50 +03002482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2485 * completed.
2486 */
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2489
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2493 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002494 flush_workqueue(hdev->req_workqueue);
2495
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002496 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2501 */
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002505
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002506 err = hci_dev_do_open(hdev);
2507
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002508done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002509 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002510 return err;
2511}
2512
Johan Hedbergd7347f32014-07-04 12:37:23 +03002513/* This function requires the caller holds hdev->lock */
2514static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515{
2516 struct hci_conn_params *p;
2517
Johan Hedbergf161dd42014-08-15 21:06:54 +03002518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2519 if (p->conn) {
2520 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002521 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002522 p->conn = NULL;
2523 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002525 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002526
2527 BT_DBG("All LE pending actions cleared");
2528}
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530static int hci_dev_do_close(struct hci_dev *hdev)
2531{
2532 BT_DBG("%s %p", hdev->name, hdev);
2533
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002534 cancel_delayed_work(&hdev->power_off);
2535
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 hci_req_cancel(hdev, ENODEV);
2537 hci_req_lock(hdev);
2538
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002540 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 hci_req_unlock(hdev);
2542 return 0;
2543 }
2544
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002547 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002549 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002550 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002551 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002554 }
2555
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002557 cancel_delayed_work(&hdev->service_cache);
2558
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002559 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002560
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002563
Johan Hedberg76727c02014-11-18 09:00:14 +02002564 /* Avoid potential lockdep warnings from the *_flush() calls by
2565 * ensuring the workqueue is empty up front.
2566 */
2567 drain_workqueue(hdev->workqueue);
2568
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002569 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002570 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002571 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002572 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002573 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
2575 hci_notify(hdev, HCI_DEV_DOWN);
2576
2577 if (hdev->flush)
2578 hdev->flush(hdev);
2579
2580 /* Reset device */
2581 skb_queue_purge(&hdev->cmd_q);
2582 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002583 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2584 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002585 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002587 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 clear_bit(HCI_INIT, &hdev->flags);
2589 }
2590
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002591 /* flush cmd work */
2592 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593
2594 /* Drop queues */
2595 skb_queue_purge(&hdev->rx_q);
2596 skb_queue_purge(&hdev->cmd_q);
2597 skb_queue_purge(&hdev->raw_q);
2598
2599 /* Drop last sent command */
2600 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002601 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 kfree_skb(hdev->sent_cmd);
2603 hdev->sent_cmd = NULL;
2604 }
2605
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002606 kfree_skb(hdev->recv_evt);
2607 hdev->recv_evt = NULL;
2608
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609 /* After this point our queues are empty
2610 * and no tasks are scheduled. */
2611 hdev->close(hdev);
2612
Johan Hedberg35b973c2013-03-15 17:06:59 -05002613 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002614 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002615 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2616
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002617 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2618 if (hdev->dev_type == HCI_BREDR) {
2619 hci_dev_lock(hdev);
2620 mgmt_powered(hdev, 0);
2621 hci_dev_unlock(hdev);
2622 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002623 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002624
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002625 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002626 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002627
Johan Hedberge59fda82012-02-22 18:11:53 +02002628 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002629 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002630 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002631
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 hci_req_unlock(hdev);
2633
2634 hci_dev_put(hdev);
2635 return 0;
2636}
2637
2638int hci_dev_close(__u16 dev)
2639{
2640 struct hci_dev *hdev;
2641 int err;
2642
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002643 hdev = hci_dev_get(dev);
2644 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002646
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002647 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2648 err = -EBUSY;
2649 goto done;
2650 }
2651
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002652 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2653 cancel_delayed_work(&hdev->power_off);
2654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002656
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002657done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658 hci_dev_put(hdev);
2659 return err;
2660}
2661
2662int hci_dev_reset(__u16 dev)
2663{
2664 struct hci_dev *hdev;
2665 int ret = 0;
2666
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002667 hdev = hci_dev_get(dev);
2668 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 return -ENODEV;
2670
2671 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Marcel Holtmann808a0492013-08-26 20:57:58 -07002673 if (!test_bit(HCI_UP, &hdev->flags)) {
2674 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002678 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2679 ret = -EBUSY;
2680 goto done;
2681 }
2682
Marcel Holtmann4a964402014-07-02 19:10:33 +02002683 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002684 ret = -EOPNOTSUPP;
2685 goto done;
2686 }
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 /* Drop queues */
2689 skb_queue_purge(&hdev->rx_q);
2690 skb_queue_purge(&hdev->cmd_q);
2691
Johan Hedberg76727c02014-11-18 09:00:14 +02002692 /* Avoid potential lockdep warnings from the *_flush() calls by
2693 * ensuring the workqueue is empty up front.
2694 */
2695 drain_workqueue(hdev->workqueue);
2696
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002697 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002698 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002700 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701
2702 if (hdev->flush)
2703 hdev->flush(hdev);
2704
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002705 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002706 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002708 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
2710done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 hci_req_unlock(hdev);
2712 hci_dev_put(hdev);
2713 return ret;
2714}
2715
2716int hci_dev_reset_stat(__u16 dev)
2717{
2718 struct hci_dev *hdev;
2719 int ret = 0;
2720
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002721 hdev = hci_dev_get(dev);
2722 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 return -ENODEV;
2724
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002725 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2726 ret = -EBUSY;
2727 goto done;
2728 }
2729
Marcel Holtmann4a964402014-07-02 19:10:33 +02002730 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002731 ret = -EOPNOTSUPP;
2732 goto done;
2733 }
2734
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2736
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002737done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 return ret;
2740}
2741
Johan Hedberg123abc02014-07-10 12:09:07 +03002742static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2743{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002744 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002745
2746 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2747
2748 if ((scan & SCAN_PAGE))
2749 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2750 &hdev->dev_flags);
2751 else
2752 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2753 &hdev->dev_flags);
2754
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002755 if ((scan & SCAN_INQUIRY)) {
2756 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2757 &hdev->dev_flags);
2758 } else {
2759 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2760 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2761 &hdev->dev_flags);
2762 }
2763
Johan Hedberg123abc02014-07-10 12:09:07 +03002764 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2765 return;
2766
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002767 if (conn_changed || discov_changed) {
2768 /* In case this was disabled through mgmt */
2769 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2770
2771 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2772 mgmt_update_adv_data(hdev);
2773
Johan Hedberg123abc02014-07-10 12:09:07 +03002774 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002775 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002776}
2777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778int hci_dev_cmd(unsigned int cmd, void __user *arg)
2779{
2780 struct hci_dev *hdev;
2781 struct hci_dev_req dr;
2782 int err = 0;
2783
2784 if (copy_from_user(&dr, arg, sizeof(dr)))
2785 return -EFAULT;
2786
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002787 hdev = hci_dev_get(dr.dev_id);
2788 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 return -ENODEV;
2790
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792 err = -EBUSY;
2793 goto done;
2794 }
2795
Marcel Holtmann4a964402014-07-02 19:10:33 +02002796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002797 err = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002801 if (hdev->dev_type != HCI_BREDR) {
2802 err = -EOPNOTSUPP;
2803 goto done;
2804 }
2805
Johan Hedberg56f87902013-10-02 13:43:13 +03002806 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2807 err = -EOPNOTSUPP;
2808 goto done;
2809 }
2810
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 switch (cmd) {
2812 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002813 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2814 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 break;
2816
2817 case HCISETENCRYPT:
2818 if (!lmp_encrypt_capable(hdev)) {
2819 err = -EOPNOTSUPP;
2820 break;
2821 }
2822
2823 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2824 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002825 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2826 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (err)
2828 break;
2829 }
2830
Johan Hedberg01178cd2013-03-05 20:37:41 +02002831 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2832 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 break;
2834
2835 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002836 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2837 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002838
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002839 /* Ensure that the connectable and discoverable states
2840 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002841 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002842 if (!err)
2843 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 break;
2845
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002846 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002847 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2848 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002849 break;
2850
2851 case HCISETLINKMODE:
2852 hdev->link_mode = ((__u16) dr.dev_opt) &
2853 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2854 break;
2855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 case HCISETPTYPE:
2857 hdev->pkt_type = (__u16) dr.dev_opt;
2858 break;
2859
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002861 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2862 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 break;
2864
2865 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002866 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2867 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 break;
2869
2870 default:
2871 err = -EINVAL;
2872 break;
2873 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002874
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002875done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 hci_dev_put(hdev);
2877 return err;
2878}
2879
2880int hci_get_dev_list(void __user *arg)
2881{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002882 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 struct hci_dev_list_req *dl;
2884 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 int n = 0, size, err;
2886 __u16 dev_num;
2887
2888 if (get_user(dev_num, (__u16 __user *) arg))
2889 return -EFAULT;
2890
2891 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2892 return -EINVAL;
2893
2894 size = sizeof(*dl) + dev_num * sizeof(*dr);
2895
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002896 dl = kzalloc(size, GFP_KERNEL);
2897 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 return -ENOMEM;
2899
2900 dr = dl->dev_req;
2901
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002902 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002903 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002904 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002905
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002906 /* When the auto-off is configured it means the transport
2907 * is running, but in that case still indicate that the
2908 * device is actually down.
2909 */
2910 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2911 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002912
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002914 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 if (++n >= dev_num)
2917 break;
2918 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002919 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
2921 dl->dev_num = n;
2922 size = sizeof(*dl) + n * sizeof(*dr);
2923
2924 err = copy_to_user(arg, dl, size);
2925 kfree(dl);
2926
2927 return err ? -EFAULT : 0;
2928}
2929
2930int hci_get_dev_info(void __user *arg)
2931{
2932 struct hci_dev *hdev;
2933 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002934 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 int err = 0;
2936
2937 if (copy_from_user(&di, arg, sizeof(di)))
2938 return -EFAULT;
2939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002940 hdev = hci_dev_get(di.dev_id);
2941 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942 return -ENODEV;
2943
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002944 /* When the auto-off is configured it means the transport
2945 * is running, but in that case still indicate that the
2946 * device is actually down.
2947 */
2948 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2949 flags = hdev->flags & ~BIT(HCI_UP);
2950 else
2951 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002952
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953 strcpy(di.name, hdev->name);
2954 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002955 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002956 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002958 if (lmp_bredr_capable(hdev)) {
2959 di.acl_mtu = hdev->acl_mtu;
2960 di.acl_pkts = hdev->acl_pkts;
2961 di.sco_mtu = hdev->sco_mtu;
2962 di.sco_pkts = hdev->sco_pkts;
2963 } else {
2964 di.acl_mtu = hdev->le_mtu;
2965 di.acl_pkts = hdev->le_pkts;
2966 di.sco_mtu = 0;
2967 di.sco_pkts = 0;
2968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 di.link_policy = hdev->link_policy;
2970 di.link_mode = hdev->link_mode;
2971
2972 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2973 memcpy(&di.features, &hdev->features, sizeof(di.features));
2974
2975 if (copy_to_user(arg, &di, sizeof(di)))
2976 err = -EFAULT;
2977
2978 hci_dev_put(hdev);
2979
2980 return err;
2981}
2982
2983/* ---- Interface to HCI drivers ---- */
2984
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002985static int hci_rfkill_set_block(void *data, bool blocked)
2986{
2987 struct hci_dev *hdev = data;
2988
2989 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2990
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002991 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2992 return -EBUSY;
2993
Johan Hedberg5e130362013-09-13 08:58:17 +03002994 if (blocked) {
2995 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002996 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2997 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002998 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002999 } else {
3000 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03003001 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003002
3003 return 0;
3004}
3005
3006static const struct rfkill_ops hci_rfkill_ops = {
3007 .set_block = hci_rfkill_set_block,
3008};
3009
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003010static void hci_power_on(struct work_struct *work)
3011{
3012 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003013 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003014
3015 BT_DBG("%s", hdev->name);
3016
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003017 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003018 if (err < 0) {
3019 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003021 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003022
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003023 /* During the HCI setup phase, a few error conditions are
3024 * ignored and they need to be checked now. If they are still
3025 * valid, it is important to turn the device back off.
3026 */
3027 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003028 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003029 (hdev->dev_type == HCI_BREDR &&
3030 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3031 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003032 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3033 hci_dev_do_close(hdev);
3034 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003035 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3036 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003037 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003038
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003039 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003040 /* For unconfigured devices, set the HCI_RAW flag
3041 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003042 */
3043 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3044 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003045
3046 /* For fully configured devices, this will send
3047 * the Index Added event. For unconfigured devices,
3048 * it will send Unconfigued Index Added event.
3049 *
3050 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3051 * and no event will be send.
3052 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003053 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003054 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003055 /* When the controller is now configured, then it
3056 * is important to clear the HCI_RAW flag.
3057 */
3058 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3059 clear_bit(HCI_RAW, &hdev->flags);
3060
Marcel Holtmannd603b762014-07-06 12:11:14 +02003061 /* Powering on the controller with HCI_CONFIG set only
3062 * happens with the transition from unconfigured to
3063 * configured. This will send the Index Added event.
3064 */
3065 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003066 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003067}
3068
3069static void hci_power_off(struct work_struct *work)
3070{
Johan Hedberg32435532011-11-07 22:16:04 +02003071 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003072 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003073
3074 BT_DBG("%s", hdev->name);
3075
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003076 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003077}
3078
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003079static void hci_discov_off(struct work_struct *work)
3080{
3081 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003082
3083 hdev = container_of(work, struct hci_dev, discov_off.work);
3084
3085 BT_DBG("%s", hdev->name);
3086
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003087 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003088}
3089
Johan Hedberg35f74982014-02-18 17:14:32 +02003090void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003091{
Johan Hedberg48210022013-01-27 00:31:28 +02003092 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003093
Johan Hedberg48210022013-01-27 00:31:28 +02003094 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3095 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003096 kfree(uuid);
3097 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003098}
3099
Johan Hedberg35f74982014-02-18 17:14:32 +02003100void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101{
Johan Hedberg0378b592014-11-19 15:22:22 +02003102 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003103
Johan Hedberg0378b592014-11-19 15:22:22 +02003104 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3105 list_del_rcu(&key->list);
3106 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003107 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003108}
3109
Johan Hedberg35f74982014-02-18 17:14:32 +02003110void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003112 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003113
Johan Hedberg970d0f12014-11-13 14:37:47 +02003114 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3115 list_del_rcu(&k->list);
3116 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003117 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003118}
3119
Johan Hedberg970c4e42014-02-18 10:19:33 +02003120void hci_smp_irks_clear(struct hci_dev *hdev)
3121{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003122 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003123
Johan Hedbergadae20c2014-11-13 14:37:48 +02003124 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3125 list_del_rcu(&k->list);
3126 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003127 }
3128}
3129
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003130struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3131{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003132 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003133
Johan Hedberg0378b592014-11-19 15:22:22 +02003134 rcu_read_lock();
3135 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3136 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3137 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003138 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003139 }
3140 }
3141 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003142
3143 return NULL;
3144}
3145
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303146static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003147 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003148{
3149 /* Legacy key */
3150 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303151 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003152
3153 /* Debug keys are insecure so don't store them persistently */
3154 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303155 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003156
3157 /* Changed combination key and there's no previous one */
3158 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303159 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003160
3161 /* Security mode 3 case */
3162 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303163 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003164
3165 /* Neither local nor remote side had no-bonding as requirement */
3166 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303167 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003168
3169 /* Local side had dedicated bonding as requirement */
3170 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303171 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003172
3173 /* Remote side had dedicated bonding as requirement */
3174 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303175 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003176
3177 /* If none of the above criteria match, then don't store the key
3178 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303179 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003180}
3181
Johan Hedberge804d252014-07-16 11:42:28 +03003182static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003183{
Johan Hedberge804d252014-07-16 11:42:28 +03003184 if (type == SMP_LTK)
3185 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003186
Johan Hedberge804d252014-07-16 11:42:28 +03003187 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003188}
3189
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003190struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003191 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003192{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003193 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003194
Johan Hedberg970d0f12014-11-13 14:37:47 +02003195 rcu_read_lock();
3196 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003197 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003198 continue;
3199
Johan Hedberge804d252014-07-16 11:42:28 +03003200 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003201 continue;
3202
Johan Hedberg970d0f12014-11-13 14:37:47 +02003203 rcu_read_unlock();
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003204 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003205 }
Johan Hedberg970d0f12014-11-13 14:37:47 +02003206 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003207
3208 return NULL;
3209}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003210
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003211struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003212 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003213{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003214 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003215
Johan Hedberg970d0f12014-11-13 14:37:47 +02003216 rcu_read_lock();
3217 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003218 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003219 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberg970d0f12014-11-13 14:37:47 +02003220 ltk_role(k->type) == role) {
3221 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003222 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003223 }
3224 }
3225 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003226
3227 return NULL;
3228}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003229
Johan Hedberg970c4e42014-02-18 10:19:33 +02003230struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3231{
3232 struct smp_irk *irk;
3233
Johan Hedbergadae20c2014-11-13 14:37:48 +02003234 rcu_read_lock();
3235 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3236 if (!bacmp(&irk->rpa, rpa)) {
3237 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003238 return irk;
3239 }
3240 }
3241
Johan Hedbergadae20c2014-11-13 14:37:48 +02003242 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3243 if (smp_irk_matches(hdev, irk->val, rpa)) {
3244 bacpy(&irk->rpa, rpa);
3245 rcu_read_unlock();
3246 return irk;
3247 }
3248 }
3249 rcu_read_unlock();
3250
Johan Hedberg970c4e42014-02-18 10:19:33 +02003251 return NULL;
3252}
3253
3254struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3255 u8 addr_type)
3256{
3257 struct smp_irk *irk;
3258
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003259 /* Identity Address must be public or static random */
3260 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3261 return NULL;
3262
Johan Hedbergadae20c2014-11-13 14:37:48 +02003263 rcu_read_lock();
3264 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003265 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003266 bacmp(bdaddr, &irk->bdaddr) == 0) {
3267 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003268 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003269 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003270 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003271 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003272
3273 return NULL;
3274}
3275
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003276struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003277 bdaddr_t *bdaddr, u8 *val, u8 type,
3278 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003279{
3280 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303281 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003282
3283 old_key = hci_find_link_key(hdev, bdaddr);
3284 if (old_key) {
3285 old_key_type = old_key->type;
3286 key = old_key;
3287 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003288 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003289 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003290 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003291 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003292 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003293 }
3294
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003295 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003296
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003297 /* Some buggy controller combinations generate a changed
3298 * combination key for legacy pairing even when there's no
3299 * previous key */
3300 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003301 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003302 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003303 if (conn)
3304 conn->key_type = type;
3305 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003306
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003307 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003308 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003309 key->pin_len = pin_len;
3310
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003311 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003312 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003313 else
3314 key->type = type;
3315
Johan Hedberg7652ff62014-06-24 13:15:49 +03003316 if (persistent)
3317 *persistent = hci_persistent_key(hdev, conn, type,
3318 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003319
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003320 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003321}
3322
Johan Hedbergca9142b2014-02-19 14:57:44 +02003323struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003324 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003325 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003326{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003327 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003328 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003329
Johan Hedberge804d252014-07-16 11:42:28 +03003330 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003331 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003332 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003333 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003334 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003335 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003336 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003337 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003338 }
3339
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003340 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003341 key->bdaddr_type = addr_type;
3342 memcpy(key->val, tk, sizeof(key->val));
3343 key->authenticated = authenticated;
3344 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003345 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003346 key->enc_size = enc_size;
3347 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003348
Johan Hedbergca9142b2014-02-19 14:57:44 +02003349 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003350}
3351
Johan Hedbergca9142b2014-02-19 14:57:44 +02003352struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3353 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003354{
3355 struct smp_irk *irk;
3356
3357 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3358 if (!irk) {
3359 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3360 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003361 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003362
3363 bacpy(&irk->bdaddr, bdaddr);
3364 irk->addr_type = addr_type;
3365
Johan Hedbergadae20c2014-11-13 14:37:48 +02003366 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003367 }
3368
3369 memcpy(irk->val, val, 16);
3370 bacpy(&irk->rpa, rpa);
3371
Johan Hedbergca9142b2014-02-19 14:57:44 +02003372 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003373}
3374
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003375int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376{
3377 struct link_key *key;
3378
3379 key = hci_find_link_key(hdev, bdaddr);
3380 if (!key)
3381 return -ENOENT;
3382
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003384
Johan Hedberg0378b592014-11-19 15:22:22 +02003385 list_del_rcu(&key->list);
3386 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003387
3388 return 0;
3389}
3390
Johan Hedberge0b2b272014-02-18 17:14:31 +02003391int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003392{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003393 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003394 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003395
Johan Hedberg970d0f12014-11-13 14:37:47 +02003396 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003397 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003398 continue;
3399
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003401
Johan Hedberg970d0f12014-11-13 14:37:47 +02003402 list_del_rcu(&k->list);
3403 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003404 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003405 }
3406
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003407 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003408}
3409
Johan Hedberga7ec7332014-02-18 17:14:35 +02003410void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3411{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003412 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003413
Johan Hedbergadae20c2014-11-13 14:37:48 +02003414 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003415 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3416 continue;
3417
3418 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3419
Johan Hedbergadae20c2014-11-13 14:37:48 +02003420 list_del_rcu(&k->list);
3421 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003422 }
3423}
3424
Ville Tervo6bd32322011-02-16 16:32:41 +02003425/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003426static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003427{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003428 struct hci_dev *hdev = container_of(work, struct hci_dev,
3429 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003430
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003431 if (hdev->sent_cmd) {
3432 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3433 u16 opcode = __le16_to_cpu(sent->opcode);
3434
3435 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3436 } else {
3437 BT_ERR("%s command tx timeout", hdev->name);
3438 }
3439
Ville Tervo6bd32322011-02-16 16:32:41 +02003440 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003441 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003442}
3443
Szymon Janc2763eda2011-03-22 13:12:22 +01003444struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003445 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003446{
3447 struct oob_data *data;
3448
3449 list_for_each_entry(data, &hdev->remote_oob_data, list)
3450 if (bacmp(bdaddr, &data->bdaddr) == 0)
3451 return data;
3452
3453 return NULL;
3454}
3455
3456int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3457{
3458 struct oob_data *data;
3459
3460 data = hci_find_remote_oob_data(hdev, bdaddr);
3461 if (!data)
3462 return -ENOENT;
3463
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003465
3466 list_del(&data->list);
3467 kfree(data);
3468
3469 return 0;
3470}
3471
Johan Hedberg35f74982014-02-18 17:14:32 +02003472void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003473{
3474 struct oob_data *data, *n;
3475
3476 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3477 list_del(&data->list);
3478 kfree(data);
3479 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003480}
3481
Marcel Holtmann07988722014-01-10 02:07:29 -08003482int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003483 u8 *hash, u8 *rand)
Szymon Janc2763eda2011-03-22 13:12:22 +01003484{
3485 struct oob_data *data;
3486
3487 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003488 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003489 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003490 if (!data)
3491 return -ENOMEM;
3492
3493 bacpy(&data->bdaddr, bdaddr);
3494 list_add(&data->list, &hdev->remote_oob_data);
3495 }
3496
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003497 memcpy(data->hash192, hash, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003498 memcpy(data->rand192, rand, sizeof(data->rand192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003499
Marcel Holtmann07988722014-01-10 02:07:29 -08003500 memset(data->hash256, 0, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003501 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003502
3503 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3504
3505 return 0;
3506}
3507
3508int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003509 u8 *hash192, u8 *rand192,
3510 u8 *hash256, u8 *rand256)
Marcel Holtmann07988722014-01-10 02:07:29 -08003511{
3512 struct oob_data *data;
3513
3514 data = hci_find_remote_oob_data(hdev, bdaddr);
3515 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003516 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003517 if (!data)
3518 return -ENOMEM;
3519
3520 bacpy(&data->bdaddr, bdaddr);
3521 list_add(&data->list, &hdev->remote_oob_data);
3522 }
3523
3524 memcpy(data->hash192, hash192, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003525 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003526
3527 memcpy(data->hash256, hash256, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003528 memcpy(data->rand256, rand256, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003529
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003530 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003531
3532 return 0;
3533}
3534
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003535struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003536 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003537{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003538 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003539
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003540 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003541 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003542 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003543 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544
3545 return NULL;
3546}
3547
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003548void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003549{
3550 struct list_head *p, *n;
3551
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003552 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003553 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003554
3555 list_del(p);
3556 kfree(b);
3557 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558}
3559
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003560int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003561{
3562 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003563
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003564 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003565 return -EBADF;
3566
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003567 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003568 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003569
Johan Hedberg27f70f32014-07-21 10:50:06 +03003570 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003571 if (!entry)
3572 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003573
3574 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003575 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003578
3579 return 0;
3580}
3581
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003582int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003583{
3584 struct bdaddr_list *entry;
3585
Johan Hedberg35f74982014-02-18 17:14:32 +02003586 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003587 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003588 return 0;
3589 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003590
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003591 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003592 if (!entry)
3593 return -ENOENT;
3594
3595 list_del(&entry->list);
3596 kfree(entry);
3597
3598 return 0;
3599}
3600
Andre Guedes15819a72014-02-03 13:56:18 -03003601/* This function requires the caller holds hdev->lock */
3602struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3603 bdaddr_t *addr, u8 addr_type)
3604{
3605 struct hci_conn_params *params;
3606
Johan Hedberg738f6182014-07-03 19:33:51 +03003607 /* The conn params list only contains identity addresses */
3608 if (!hci_is_identity_address(addr, addr_type))
3609 return NULL;
3610
Andre Guedes15819a72014-02-03 13:56:18 -03003611 list_for_each_entry(params, &hdev->le_conn_params, list) {
3612 if (bacmp(&params->addr, addr) == 0 &&
3613 params->addr_type == addr_type) {
3614 return params;
3615 }
3616 }
3617
3618 return NULL;
3619}
3620
Andre Guedescef952c2014-02-26 20:21:49 -03003621static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3622{
3623 struct hci_conn *conn;
3624
3625 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3626 if (!conn)
3627 return false;
3628
3629 if (conn->dst_type != type)
3630 return false;
3631
3632 if (conn->state != BT_CONNECTED)
3633 return false;
3634
3635 return true;
3636}
3637
Andre Guedes15819a72014-02-03 13:56:18 -03003638/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003639struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3640 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003641{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003642 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003643
Johan Hedberg738f6182014-07-03 19:33:51 +03003644 /* The list only contains identity addresses */
3645 if (!hci_is_identity_address(addr, addr_type))
3646 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003647
Johan Hedberg501f8822014-07-04 12:37:26 +03003648 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003649 if (bacmp(&param->addr, addr) == 0 &&
3650 param->addr_type == addr_type)
3651 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003652 }
3653
3654 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003655}
3656
3657/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003658struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3659 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003660{
3661 struct hci_conn_params *params;
3662
Johan Hedbergc46245b2014-07-02 17:37:33 +03003663 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003664 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003665
Andre Guedes15819a72014-02-03 13:56:18 -03003666 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003667 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003668 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003669
3670 params = kzalloc(sizeof(*params), GFP_KERNEL);
3671 if (!params) {
3672 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003673 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003674 }
3675
3676 bacpy(&params->addr, addr);
3677 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003678
3679 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003680 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003681
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003682 params->conn_min_interval = hdev->le_conn_min_interval;
3683 params->conn_max_interval = hdev->le_conn_max_interval;
3684 params->conn_latency = hdev->le_conn_latency;
3685 params->supervision_timeout = hdev->le_supv_timeout;
3686 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3687
3688 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3689
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003690 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003691}
3692
3693/* This function requires the caller holds hdev->lock */
3694int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003695 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003696{
3697 struct hci_conn_params *params;
3698
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003699 params = hci_conn_params_add(hdev, addr, addr_type);
3700 if (!params)
3701 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003702
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003703 if (params->auto_connect == auto_connect)
3704 return 0;
3705
Johan Hedberg95305ba2014-07-04 12:37:21 +03003706 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003707
Andre Guedescef952c2014-02-26 20:21:49 -03003708 switch (auto_connect) {
3709 case HCI_AUTO_CONN_DISABLED:
3710 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003711 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003712 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003713 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003714 list_add(&params->action, &hdev->pend_le_reports);
3715 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003716 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003717 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003718 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003719 if (!is_connected(hdev, addr, addr_type)) {
3720 list_add(&params->action, &hdev->pend_le_conns);
3721 hci_update_background_scan(hdev);
3722 }
Andre Guedescef952c2014-02-26 20:21:49 -03003723 break;
3724 }
Andre Guedes15819a72014-02-03 13:56:18 -03003725
Johan Hedberg851efca2014-07-02 22:42:00 +03003726 params->auto_connect = auto_connect;
3727
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003728 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3729 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003730
3731 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003732}
3733
Johan Hedbergf6c63242014-08-15 21:06:59 +03003734static void hci_conn_params_free(struct hci_conn_params *params)
3735{
3736 if (params->conn) {
3737 hci_conn_drop(params->conn);
3738 hci_conn_put(params->conn);
3739 }
3740
3741 list_del(&params->action);
3742 list_del(&params->list);
3743 kfree(params);
3744}
3745
Andre Guedes15819a72014-02-03 13:56:18 -03003746/* This function requires the caller holds hdev->lock */
3747void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3748{
3749 struct hci_conn_params *params;
3750
3751 params = hci_conn_params_lookup(hdev, addr, addr_type);
3752 if (!params)
3753 return;
3754
Johan Hedbergf6c63242014-08-15 21:06:59 +03003755 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003756
Johan Hedberg95305ba2014-07-04 12:37:21 +03003757 hci_update_background_scan(hdev);
3758
Andre Guedes15819a72014-02-03 13:56:18 -03003759 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3760}
3761
3762/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003763void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003764{
3765 struct hci_conn_params *params, *tmp;
3766
3767 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003768 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3769 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003770 list_del(&params->list);
3771 kfree(params);
3772 }
3773
Johan Hedberg55af49a2014-07-02 17:37:26 +03003774 BT_DBG("All LE disabled connection parameters were removed");
3775}
3776
3777/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003778void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003779{
3780 struct hci_conn_params *params, *tmp;
3781
Johan Hedbergf6c63242014-08-15 21:06:59 +03003782 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3783 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003784
Johan Hedberga2f41a82014-07-04 12:37:19 +03003785 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003786
Andre Guedes15819a72014-02-03 13:56:18 -03003787 BT_DBG("All LE connection parameters were removed");
3788}
3789
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003790static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003791{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003792 if (status) {
3793 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003794
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003795 hci_dev_lock(hdev);
3796 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3797 hci_dev_unlock(hdev);
3798 return;
3799 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003800}
3801
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003802static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003803{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003804 /* General inquiry access code (GIAC) */
3805 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3806 struct hci_request req;
3807 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003808 int err;
3809
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003810 if (status) {
3811 BT_ERR("Failed to disable LE scanning: status %d", status);
3812 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003813 }
3814
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003815 switch (hdev->discovery.type) {
3816 case DISCOV_TYPE_LE:
3817 hci_dev_lock(hdev);
3818 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3819 hci_dev_unlock(hdev);
3820 break;
3821
3822 case DISCOV_TYPE_INTERLEAVED:
3823 hci_req_init(&req, hdev);
3824
3825 memset(&cp, 0, sizeof(cp));
3826 memcpy(&cp.lap, lap, sizeof(cp.lap));
3827 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3828 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3829
3830 hci_dev_lock(hdev);
3831
3832 hci_inquiry_cache_flush(hdev);
3833
3834 err = hci_req_run(&req, inquiry_complete);
3835 if (err) {
3836 BT_ERR("Inquiry request failed: err %d", err);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 }
3839
3840 hci_dev_unlock(hdev);
3841 break;
3842 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003843}
3844
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003845static void le_scan_disable_work(struct work_struct *work)
3846{
3847 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003848 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003849 struct hci_request req;
3850 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003851
3852 BT_DBG("%s", hdev->name);
3853
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003854 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003855
Andre Guedesb1efcc22014-02-26 20:21:40 -03003856 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003857
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003858 err = hci_req_run(&req, le_scan_disable_work_complete);
3859 if (err)
3860 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003861}
3862
Johan Hedberg8d972502014-02-28 12:54:14 +02003863static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3864{
3865 struct hci_dev *hdev = req->hdev;
3866
3867 /* If we're advertising or initiating an LE connection we can't
3868 * go ahead and change the random address at this time. This is
3869 * because the eventual initiator address used for the
3870 * subsequently created connection will be undefined (some
3871 * controllers use the new address and others the one we had
3872 * when the operation started).
3873 *
3874 * In this kind of scenario skip the update and let the random
3875 * address be updated at the next cycle.
3876 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003877 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003878 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3879 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003880 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003881 return;
3882 }
3883
3884 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3885}
3886
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003887int hci_update_random_address(struct hci_request *req, bool require_privacy,
3888 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003889{
3890 struct hci_dev *hdev = req->hdev;
3891 int err;
3892
3893 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003894 * current RPA has expired or there is something else than
3895 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003896 */
3897 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003898 int to;
3899
3900 *own_addr_type = ADDR_LE_DEV_RANDOM;
3901
3902 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003903 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003904 return 0;
3905
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003906 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003907 if (err < 0) {
3908 BT_ERR("%s failed to generate new RPA", hdev->name);
3909 return err;
3910 }
3911
Johan Hedberg8d972502014-02-28 12:54:14 +02003912 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003913
3914 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3915 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3916
3917 return 0;
3918 }
3919
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003920 /* In case of required privacy without resolvable private address,
3921 * use an unresolvable private address. This is useful for active
3922 * scanning and non-connectable advertising.
3923 */
3924 if (require_privacy) {
3925 bdaddr_t urpa;
3926
3927 get_random_bytes(&urpa, 6);
3928 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3929
3930 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003931 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003932 return 0;
3933 }
3934
Johan Hedbergebd3a742014-02-23 19:42:21 +02003935 /* If forcing static address is in use or there is no public
3936 * address use the static address as random address (but skip
3937 * the HCI command if the current random address is already the
3938 * static one.
3939 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003940 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003941 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3942 *own_addr_type = ADDR_LE_DEV_RANDOM;
3943 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3944 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3945 &hdev->static_addr);
3946 return 0;
3947 }
3948
3949 /* Neither privacy nor static address is being used so use a
3950 * public address.
3951 */
3952 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3953
3954 return 0;
3955}
3956
Johan Hedberga1f4c312014-02-27 14:05:41 +02003957/* Copy the Identity Address of the controller.
3958 *
3959 * If the controller has a public BD_ADDR, then by default use that one.
3960 * If this is a LE only controller without a public address, default to
3961 * the static random address.
3962 *
3963 * For debugging purposes it is possible to force controllers with a
3964 * public address to use the static random address instead.
3965 */
3966void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3967 u8 *bdaddr_type)
3968{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003969 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003970 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3971 bacpy(bdaddr, &hdev->static_addr);
3972 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3973 } else {
3974 bacpy(bdaddr, &hdev->bdaddr);
3975 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3976 }
3977}
3978
David Herrmann9be0dab2012-04-22 14:39:57 +02003979/* Alloc HCI device */
3980struct hci_dev *hci_alloc_dev(void)
3981{
3982 struct hci_dev *hdev;
3983
Johan Hedberg27f70f32014-07-21 10:50:06 +03003984 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003985 if (!hdev)
3986 return NULL;
3987
David Herrmannb1b813d2012-04-22 14:39:58 +02003988 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3989 hdev->esco_type = (ESCO_HV1);
3990 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003991 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3992 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003993 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003994 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3995 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003996
David Herrmannb1b813d2012-04-22 14:39:58 +02003997 hdev->sniff_max_interval = 800;
3998 hdev->sniff_min_interval = 80;
3999
Marcel Holtmann3f959d42014-02-20 11:55:56 -08004000 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02004001 hdev->le_adv_min_interval = 0x0800;
4002 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004003 hdev->le_scan_interval = 0x0060;
4004 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07004005 hdev->le_conn_min_interval = 0x0028;
4006 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02004007 hdev->le_conn_latency = 0x0000;
4008 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07004009
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004010 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004011 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004012 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4013 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004014
David Herrmannb1b813d2012-04-22 14:39:58 +02004015 mutex_init(&hdev->lock);
4016 mutex_init(&hdev->req_lock);
4017
4018 INIT_LIST_HEAD(&hdev->mgmt_pending);
4019 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004020 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004021 INIT_LIST_HEAD(&hdev->uuids);
4022 INIT_LIST_HEAD(&hdev->link_keys);
4023 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004024 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004025 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004026 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004027 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004028 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004029 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004030 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004031
4032 INIT_WORK(&hdev->rx_work, hci_rx_work);
4033 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4034 INIT_WORK(&hdev->tx_work, hci_tx_work);
4035 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004036
David Herrmannb1b813d2012-04-22 14:39:58 +02004037 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4038 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4039 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4040
David Herrmannb1b813d2012-04-22 14:39:58 +02004041 skb_queue_head_init(&hdev->rx_q);
4042 skb_queue_head_init(&hdev->cmd_q);
4043 skb_queue_head_init(&hdev->raw_q);
4044
4045 init_waitqueue_head(&hdev->req_wait_q);
4046
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004047 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004048
David Herrmannb1b813d2012-04-22 14:39:58 +02004049 hci_init_sysfs(hdev);
4050 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004051
4052 return hdev;
4053}
4054EXPORT_SYMBOL(hci_alloc_dev);
4055
4056/* Free HCI device */
4057void hci_free_dev(struct hci_dev *hdev)
4058{
David Herrmann9be0dab2012-04-22 14:39:57 +02004059 /* will free via device release */
4060 put_device(&hdev->dev);
4061}
4062EXPORT_SYMBOL(hci_free_dev);
4063
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064/* Register HCI device */
4065int hci_register_dev(struct hci_dev *hdev)
4066{
David Herrmannb1b813d2012-04-22 14:39:58 +02004067 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068
Marcel Holtmann74292d52014-07-06 15:50:27 +02004069 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 return -EINVAL;
4071
Mat Martineau08add512011-11-02 16:18:36 -07004072 /* Do not allow HCI_AMP devices to register at index 0,
4073 * so the index can be used as the AMP controller ID.
4074 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004075 switch (hdev->dev_type) {
4076 case HCI_BREDR:
4077 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4078 break;
4079 case HCI_AMP:
4080 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4081 break;
4082 default:
4083 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004085
Sasha Levin3df92b32012-05-27 22:36:56 +02004086 if (id < 0)
4087 return id;
4088
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 sprintf(hdev->name, "hci%d", id);
4090 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004091
4092 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4093
Kees Cookd8537542013-07-03 15:04:57 -07004094 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4095 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004096 if (!hdev->workqueue) {
4097 error = -ENOMEM;
4098 goto err;
4099 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004100
Kees Cookd8537542013-07-03 15:04:57 -07004101 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4102 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004103 if (!hdev->req_workqueue) {
4104 destroy_workqueue(hdev->workqueue);
4105 error = -ENOMEM;
4106 goto err;
4107 }
4108
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004109 if (!IS_ERR_OR_NULL(bt_debugfs))
4110 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4111
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004112 dev_set_name(&hdev->dev, "%s", hdev->name);
4113
4114 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004115 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004116 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004118 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004119 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4120 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004121 if (hdev->rfkill) {
4122 if (rfkill_register(hdev->rfkill) < 0) {
4123 rfkill_destroy(hdev->rfkill);
4124 hdev->rfkill = NULL;
4125 }
4126 }
4127
Johan Hedberg5e130362013-09-13 08:58:17 +03004128 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4129 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4130
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004131 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004132 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004133
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004134 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004135 /* Assume BR/EDR support until proven otherwise (such as
4136 * through reading supported features during init.
4137 */
4138 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4139 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004140
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004141 write_lock(&hci_dev_list_lock);
4142 list_add(&hdev->list, &hci_dev_list);
4143 write_unlock(&hci_dev_list_lock);
4144
Marcel Holtmann4a964402014-07-02 19:10:33 +02004145 /* Devices that are marked for raw-only usage are unconfigured
4146 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004147 */
4148 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004149 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004150
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004152 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153
Johan Hedberg19202572013-01-14 22:33:51 +02004154 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004155
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004157
David Herrmann33ca9542011-10-08 14:58:49 +02004158err_wqueue:
4159 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004160 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004161err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004162 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004163
David Herrmann33ca9542011-10-08 14:58:49 +02004164 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004165}
4166EXPORT_SYMBOL(hci_register_dev);
4167
4168/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004169void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170{
Sasha Levin3df92b32012-05-27 22:36:56 +02004171 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004172
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004173 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004174
Johan Hovold94324962012-03-15 14:48:41 +01004175 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4176
Sasha Levin3df92b32012-05-27 22:36:56 +02004177 id = hdev->id;
4178
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004179 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004181 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182
4183 hci_dev_do_close(hdev);
4184
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304185 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004186 kfree_skb(hdev->reassembly[i]);
4187
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004188 cancel_work_sync(&hdev->power_on);
4189
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004190 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004191 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4192 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004193 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004194 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004195 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004196 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004197
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004198 /* mgmt_index_removed should take care of emptying the
4199 * pending list */
4200 BUG_ON(!list_empty(&hdev->mgmt_pending));
4201
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 hci_notify(hdev, HCI_DEV_UNREG);
4203
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004204 if (hdev->rfkill) {
4205 rfkill_unregister(hdev->rfkill);
4206 rfkill_destroy(hdev->rfkill);
4207 }
4208
Johan Hedberg711eafe2014-08-08 09:32:52 +03004209 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004210
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004211 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004212
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004213 debugfs_remove_recursive(hdev->debugfs);
4214
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004215 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004216 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004217
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004218 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004219 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004220 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004221 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004222 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004223 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004224 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004225 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004226 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004227 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004228 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004229
David Herrmanndc946bd2012-01-07 15:47:24 +01004230 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004231
4232 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233}
4234EXPORT_SYMBOL(hci_unregister_dev);
4235
4236/* Suspend HCI device */
4237int hci_suspend_dev(struct hci_dev *hdev)
4238{
4239 hci_notify(hdev, HCI_DEV_SUSPEND);
4240 return 0;
4241}
4242EXPORT_SYMBOL(hci_suspend_dev);
4243
4244/* Resume HCI device */
4245int hci_resume_dev(struct hci_dev *hdev)
4246{
4247 hci_notify(hdev, HCI_DEV_RESUME);
4248 return 0;
4249}
4250EXPORT_SYMBOL(hci_resume_dev);
4251
Marcel Holtmann75e05692014-11-02 08:15:38 +01004252/* Reset HCI device */
4253int hci_reset_dev(struct hci_dev *hdev)
4254{
4255 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4256 struct sk_buff *skb;
4257
4258 skb = bt_skb_alloc(3, GFP_ATOMIC);
4259 if (!skb)
4260 return -ENOMEM;
4261
4262 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4263 memcpy(skb_put(skb, 3), hw_err, 3);
4264
4265 /* Send Hardware Error to upper stack */
4266 return hci_recv_frame(hdev, skb);
4267}
4268EXPORT_SYMBOL(hci_reset_dev);
4269
Marcel Holtmann76bca882009-11-18 00:40:39 +01004270/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004271int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004272{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004273 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004274 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004275 kfree_skb(skb);
4276 return -ENXIO;
4277 }
4278
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004279 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004280 bt_cb(skb)->incoming = 1;
4281
4282 /* Time stamp */
4283 __net_timestamp(skb);
4284
Marcel Holtmann76bca882009-11-18 00:40:39 +01004285 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004286 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004287
Marcel Holtmann76bca882009-11-18 00:40:39 +01004288 return 0;
4289}
4290EXPORT_SYMBOL(hci_recv_frame);
4291
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304292static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004293 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304294{
4295 int len = 0;
4296 int hlen = 0;
4297 int remain = count;
4298 struct sk_buff *skb;
4299 struct bt_skb_cb *scb;
4300
4301 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004302 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304303 return -EILSEQ;
4304
4305 skb = hdev->reassembly[index];
4306
4307 if (!skb) {
4308 switch (type) {
4309 case HCI_ACLDATA_PKT:
4310 len = HCI_MAX_FRAME_SIZE;
4311 hlen = HCI_ACL_HDR_SIZE;
4312 break;
4313 case HCI_EVENT_PKT:
4314 len = HCI_MAX_EVENT_SIZE;
4315 hlen = HCI_EVENT_HDR_SIZE;
4316 break;
4317 case HCI_SCODATA_PKT:
4318 len = HCI_MAX_SCO_SIZE;
4319 hlen = HCI_SCO_HDR_SIZE;
4320 break;
4321 }
4322
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004323 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304324 if (!skb)
4325 return -ENOMEM;
4326
4327 scb = (void *) skb->cb;
4328 scb->expect = hlen;
4329 scb->pkt_type = type;
4330
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304331 hdev->reassembly[index] = skb;
4332 }
4333
4334 while (count) {
4335 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004336 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304337
4338 memcpy(skb_put(skb, len), data, len);
4339
4340 count -= len;
4341 data += len;
4342 scb->expect -= len;
4343 remain = count;
4344
4345 switch (type) {
4346 case HCI_EVENT_PKT:
4347 if (skb->len == HCI_EVENT_HDR_SIZE) {
4348 struct hci_event_hdr *h = hci_event_hdr(skb);
4349 scb->expect = h->plen;
4350
4351 if (skb_tailroom(skb) < scb->expect) {
4352 kfree_skb(skb);
4353 hdev->reassembly[index] = NULL;
4354 return -ENOMEM;
4355 }
4356 }
4357 break;
4358
4359 case HCI_ACLDATA_PKT:
4360 if (skb->len == HCI_ACL_HDR_SIZE) {
4361 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4362 scb->expect = __le16_to_cpu(h->dlen);
4363
4364 if (skb_tailroom(skb) < scb->expect) {
4365 kfree_skb(skb);
4366 hdev->reassembly[index] = NULL;
4367 return -ENOMEM;
4368 }
4369 }
4370 break;
4371
4372 case HCI_SCODATA_PKT:
4373 if (skb->len == HCI_SCO_HDR_SIZE) {
4374 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4375 scb->expect = h->dlen;
4376
4377 if (skb_tailroom(skb) < scb->expect) {
4378 kfree_skb(skb);
4379 hdev->reassembly[index] = NULL;
4380 return -ENOMEM;
4381 }
4382 }
4383 break;
4384 }
4385
4386 if (scb->expect == 0) {
4387 /* Complete frame */
4388
4389 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004390 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304391
4392 hdev->reassembly[index] = NULL;
4393 return remain;
4394 }
4395 }
4396
4397 return remain;
4398}
4399
Suraj Sumangala99811512010-07-14 13:02:19 +05304400#define STREAM_REASSEMBLY 0
4401
4402int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4403{
4404 int type;
4405 int rem = 0;
4406
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004407 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304408 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4409
4410 if (!skb) {
4411 struct { char type; } *pkt;
4412
4413 /* Start of the frame */
4414 pkt = data;
4415 type = pkt->type;
4416
4417 data++;
4418 count--;
4419 } else
4420 type = bt_cb(skb)->pkt_type;
4421
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004422 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004423 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304424 if (rem < 0)
4425 return rem;
4426
4427 data += (count - rem);
4428 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004429 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304430
4431 return rem;
4432}
4433EXPORT_SYMBOL(hci_recv_stream_fragment);
4434
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435/* ---- Interface to upper protocols ---- */
4436
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437int hci_register_cb(struct hci_cb *cb)
4438{
4439 BT_DBG("%p name %s", cb, cb->name);
4440
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004441 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004443 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444
4445 return 0;
4446}
4447EXPORT_SYMBOL(hci_register_cb);
4448
4449int hci_unregister_cb(struct hci_cb *cb)
4450{
4451 BT_DBG("%p name %s", cb, cb->name);
4452
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004453 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004455 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456
4457 return 0;
4458}
4459EXPORT_SYMBOL(hci_unregister_cb);
4460
Marcel Holtmann51086992013-10-10 14:54:19 -07004461static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004463 int err;
4464
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004465 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004467 /* Time stamp */
4468 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004469
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004470 /* Send copy to monitor */
4471 hci_send_to_monitor(hdev, skb);
4472
4473 if (atomic_read(&hdev->promisc)) {
4474 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004475 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 }
4477
4478 /* Get rid of skb owner, prior to sending to the driver. */
4479 skb_orphan(skb);
4480
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004481 err = hdev->send(hdev, skb);
4482 if (err < 0) {
4483 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4484 kfree_skb(skb);
4485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004486}
4487
Johan Hedberg3119ae92013-03-05 20:37:44 +02004488void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4489{
4490 skb_queue_head_init(&req->cmd_q);
4491 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004492 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004493}
4494
4495int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4496{
4497 struct hci_dev *hdev = req->hdev;
4498 struct sk_buff *skb;
4499 unsigned long flags;
4500
4501 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4502
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004503 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004504 * commands queued on the HCI request queue.
4505 */
4506 if (req->err) {
4507 skb_queue_purge(&req->cmd_q);
4508 return req->err;
4509 }
4510
Johan Hedberg3119ae92013-03-05 20:37:44 +02004511 /* Do not allow empty requests */
4512 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004513 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004514
4515 skb = skb_peek_tail(&req->cmd_q);
4516 bt_cb(skb)->req.complete = complete;
4517
4518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4520 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4521
4522 queue_work(hdev->workqueue, &hdev->cmd_work);
4523
4524 return 0;
4525}
4526
Marcel Holtmann899de762014-07-11 05:51:58 +02004527bool hci_req_pending(struct hci_dev *hdev)
4528{
4529 return (hdev->req_status == HCI_REQ_PEND);
4530}
4531
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004532static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004533 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534{
4535 int len = HCI_COMMAND_HDR_SIZE + plen;
4536 struct hci_command_hdr *hdr;
4537 struct sk_buff *skb;
4538
Linus Torvalds1da177e2005-04-16 15:20:36 -07004539 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004540 if (!skb)
4541 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004542
4543 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004544 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 hdr->plen = plen;
4546
4547 if (plen)
4548 memcpy(skb_put(skb, plen), param, plen);
4549
4550 BT_DBG("skb len %d", skb->len);
4551
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004552 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004553 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004554
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004555 return skb;
4556}
4557
4558/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004559int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4560 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004561{
4562 struct sk_buff *skb;
4563
4564 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4565
4566 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4567 if (!skb) {
4568 BT_ERR("%s no memory for command", hdev->name);
4569 return -ENOMEM;
4570 }
4571
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004572 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004573 * single-command requests.
4574 */
4575 bt_cb(skb)->req.start = true;
4576
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004578 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004579
4580 return 0;
4581}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004582
Johan Hedberg71c76a12013-03-05 20:37:46 +02004583/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004584void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4585 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004586{
4587 struct hci_dev *hdev = req->hdev;
4588 struct sk_buff *skb;
4589
4590 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4591
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004592 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004593 * queueing the HCI command. We can simply return.
4594 */
4595 if (req->err)
4596 return;
4597
Johan Hedberg71c76a12013-03-05 20:37:46 +02004598 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4599 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004600 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4601 hdev->name, opcode);
4602 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004603 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004604 }
4605
4606 if (skb_queue_empty(&req->cmd_q))
4607 bt_cb(skb)->req.start = true;
4608
Johan Hedberg02350a72013-04-03 21:50:29 +03004609 bt_cb(skb)->req.event = event;
4610
Johan Hedberg71c76a12013-03-05 20:37:46 +02004611 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004612}
4613
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004614void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4615 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004616{
4617 hci_req_add_ev(req, opcode, plen, param, 0);
4618}
4619
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004621void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004622{
4623 struct hci_command_hdr *hdr;
4624
4625 if (!hdev->sent_cmd)
4626 return NULL;
4627
4628 hdr = (void *) hdev->sent_cmd->data;
4629
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004630 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004631 return NULL;
4632
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004633 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634
4635 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4636}
4637
4638/* Send ACL data */
4639static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4640{
4641 struct hci_acl_hdr *hdr;
4642 int len = skb->len;
4643
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004644 skb_push(skb, HCI_ACL_HDR_SIZE);
4645 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004646 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004647 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4648 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004649}
4650
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004651static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004652 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004653{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004654 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004655 struct hci_dev *hdev = conn->hdev;
4656 struct sk_buff *list;
4657
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004658 skb->len = skb_headlen(skb);
4659 skb->data_len = 0;
4660
4661 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004662
4663 switch (hdev->dev_type) {
4664 case HCI_BREDR:
4665 hci_add_acl_hdr(skb, conn->handle, flags);
4666 break;
4667 case HCI_AMP:
4668 hci_add_acl_hdr(skb, chan->handle, flags);
4669 break;
4670 default:
4671 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4672 return;
4673 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004674
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004675 list = skb_shinfo(skb)->frag_list;
4676 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004677 /* Non fragmented */
4678 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4679
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004680 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004681 } else {
4682 /* Fragmented */
4683 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4684
4685 skb_shinfo(skb)->frag_list = NULL;
4686
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004687 /* Queue all fragments atomically. We need to use spin_lock_bh
4688 * here because of 6LoWPAN links, as there this function is
4689 * called from softirq and using normal spin lock could cause
4690 * deadlocks.
4691 */
4692 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004694 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004695
4696 flags &= ~ACL_START;
4697 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698 do {
4699 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004700
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004701 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004702 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703
4704 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4705
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004706 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707 } while (list);
4708
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004709 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004711}
4712
4713void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4714{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004715 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004716
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004717 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004718
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004719 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004720
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004721 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723
4724/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004725void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726{
4727 struct hci_dev *hdev = conn->hdev;
4728 struct hci_sco_hdr hdr;
4729
4730 BT_DBG("%s len %d", hdev->name, skb->len);
4731
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004732 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 hdr.dlen = skb->len;
4734
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004735 skb_push(skb, HCI_SCO_HDR_SIZE);
4736 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004737 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004739 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004740
Linus Torvalds1da177e2005-04-16 15:20:36 -07004741 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004742 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744
4745/* ---- HCI TX task (outgoing data) ---- */
4746
4747/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004748static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4749 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004750{
4751 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004752 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004753 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004754
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004755 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004757
4758 rcu_read_lock();
4759
4760 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004761 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004763
4764 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4765 continue;
4766
Linus Torvalds1da177e2005-04-16 15:20:36 -07004767 num++;
4768
4769 if (c->sent < min) {
4770 min = c->sent;
4771 conn = c;
4772 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004773
4774 if (hci_conn_num(hdev, type) == num)
4775 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004776 }
4777
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004778 rcu_read_unlock();
4779
Linus Torvalds1da177e2005-04-16 15:20:36 -07004780 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004781 int cnt, q;
4782
4783 switch (conn->type) {
4784 case ACL_LINK:
4785 cnt = hdev->acl_cnt;
4786 break;
4787 case SCO_LINK:
4788 case ESCO_LINK:
4789 cnt = hdev->sco_cnt;
4790 break;
4791 case LE_LINK:
4792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4793 break;
4794 default:
4795 cnt = 0;
4796 BT_ERR("Unknown link type");
4797 }
4798
4799 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800 *quote = q ? q : 1;
4801 } else
4802 *quote = 0;
4803
4804 BT_DBG("conn %p quote %d", conn, *quote);
4805 return conn;
4806}
4807
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004808static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809{
4810 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004811 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812
Ville Tervobae1f5d92011-02-10 22:38:53 -03004813 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004815 rcu_read_lock();
4816
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004818 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004819 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004820 BT_ERR("%s killing stalled connection %pMR",
4821 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004822 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823 }
4824 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004825
4826 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004827}
4828
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004829static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4830 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004831{
4832 struct hci_conn_hash *h = &hdev->conn_hash;
4833 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004834 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004835 struct hci_conn *conn;
4836 int cnt, q, conn_num = 0;
4837
4838 BT_DBG("%s", hdev->name);
4839
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004840 rcu_read_lock();
4841
4842 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004843 struct hci_chan *tmp;
4844
4845 if (conn->type != type)
4846 continue;
4847
4848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4849 continue;
4850
4851 conn_num++;
4852
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004853 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004854 struct sk_buff *skb;
4855
4856 if (skb_queue_empty(&tmp->data_q))
4857 continue;
4858
4859 skb = skb_peek(&tmp->data_q);
4860 if (skb->priority < cur_prio)
4861 continue;
4862
4863 if (skb->priority > cur_prio) {
4864 num = 0;
4865 min = ~0;
4866 cur_prio = skb->priority;
4867 }
4868
4869 num++;
4870
4871 if (conn->sent < min) {
4872 min = conn->sent;
4873 chan = tmp;
4874 }
4875 }
4876
4877 if (hci_conn_num(hdev, type) == conn_num)
4878 break;
4879 }
4880
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004881 rcu_read_unlock();
4882
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004883 if (!chan)
4884 return NULL;
4885
4886 switch (chan->conn->type) {
4887 case ACL_LINK:
4888 cnt = hdev->acl_cnt;
4889 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004890 case AMP_LINK:
4891 cnt = hdev->block_cnt;
4892 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004893 case SCO_LINK:
4894 case ESCO_LINK:
4895 cnt = hdev->sco_cnt;
4896 break;
4897 case LE_LINK:
4898 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4899 break;
4900 default:
4901 cnt = 0;
4902 BT_ERR("Unknown link type");
4903 }
4904
4905 q = cnt / num;
4906 *quote = q ? q : 1;
4907 BT_DBG("chan %p quote %d", chan, *quote);
4908 return chan;
4909}
4910
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004911static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4912{
4913 struct hci_conn_hash *h = &hdev->conn_hash;
4914 struct hci_conn *conn;
4915 int num = 0;
4916
4917 BT_DBG("%s", hdev->name);
4918
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004919 rcu_read_lock();
4920
4921 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004922 struct hci_chan *chan;
4923
4924 if (conn->type != type)
4925 continue;
4926
4927 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4928 continue;
4929
4930 num++;
4931
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004932 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004933 struct sk_buff *skb;
4934
4935 if (chan->sent) {
4936 chan->sent = 0;
4937 continue;
4938 }
4939
4940 if (skb_queue_empty(&chan->data_q))
4941 continue;
4942
4943 skb = skb_peek(&chan->data_q);
4944 if (skb->priority >= HCI_PRIO_MAX - 1)
4945 continue;
4946
4947 skb->priority = HCI_PRIO_MAX - 1;
4948
4949 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004950 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004951 }
4952
4953 if (hci_conn_num(hdev, type) == num)
4954 break;
4955 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004956
4957 rcu_read_unlock();
4958
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004959}
4960
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004961static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4962{
4963 /* Calculate count of blocks used by this packet */
4964 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4965}
4966
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004967static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004969 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004970 /* ACL tx timeout must be longer than maximum
4971 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004972 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004973 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004974 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004976}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004978static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004979{
4980 unsigned int cnt = hdev->acl_cnt;
4981 struct hci_chan *chan;
4982 struct sk_buff *skb;
4983 int quote;
4984
4985 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004986
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004987 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004988 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004989 u32 priority = (skb_peek(&chan->data_q))->priority;
4990 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004991 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004992 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004993
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004994 /* Stop if priority has changed */
4995 if (skb->priority < priority)
4996 break;
4997
4998 skb = skb_dequeue(&chan->data_q);
4999
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005000 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03005001 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005002
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005003 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004 hdev->acl_last_tx = jiffies;
5005
5006 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005007 chan->sent++;
5008 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 }
5010 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005011
5012 if (cnt != hdev->acl_cnt)
5013 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014}
5015
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005016static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005017{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005018 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005019 struct hci_chan *chan;
5020 struct sk_buff *skb;
5021 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005022 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005023
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005024 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005025
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005026 BT_DBG("%s", hdev->name);
5027
5028 if (hdev->dev_type == HCI_AMP)
5029 type = AMP_LINK;
5030 else
5031 type = ACL_LINK;
5032
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005033 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005034 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005035 u32 priority = (skb_peek(&chan->data_q))->priority;
5036 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5037 int blocks;
5038
5039 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005040 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005041
5042 /* Stop if priority has changed */
5043 if (skb->priority < priority)
5044 break;
5045
5046 skb = skb_dequeue(&chan->data_q);
5047
5048 blocks = __get_blocks(hdev, skb);
5049 if (blocks > hdev->block_cnt)
5050 return;
5051
5052 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005053 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005054
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005055 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005056 hdev->acl_last_tx = jiffies;
5057
5058 hdev->block_cnt -= blocks;
5059 quote -= blocks;
5060
5061 chan->sent += blocks;
5062 chan->conn->sent += blocks;
5063 }
5064 }
5065
5066 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005067 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005068}
5069
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005070static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005071{
5072 BT_DBG("%s", hdev->name);
5073
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005074 /* No ACL link over BR/EDR controller */
5075 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5076 return;
5077
5078 /* No AMP link over AMP controller */
5079 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005080 return;
5081
5082 switch (hdev->flow_ctl_mode) {
5083 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5084 hci_sched_acl_pkt(hdev);
5085 break;
5086
5087 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5088 hci_sched_acl_blk(hdev);
5089 break;
5090 }
5091}
5092
Linus Torvalds1da177e2005-04-16 15:20:36 -07005093/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005094static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095{
5096 struct hci_conn *conn;
5097 struct sk_buff *skb;
5098 int quote;
5099
5100 BT_DBG("%s", hdev->name);
5101
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005102 if (!hci_conn_num(hdev, SCO_LINK))
5103 return;
5104
Linus Torvalds1da177e2005-04-16 15:20:36 -07005105 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5106 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5107 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005108 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005109
5110 conn->sent++;
5111 if (conn->sent == ~0)
5112 conn->sent = 0;
5113 }
5114 }
5115}
5116
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005117static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005118{
5119 struct hci_conn *conn;
5120 struct sk_buff *skb;
5121 int quote;
5122
5123 BT_DBG("%s", hdev->name);
5124
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005125 if (!hci_conn_num(hdev, ESCO_LINK))
5126 return;
5127
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005128 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5129 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005130 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5131 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005132 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005133
5134 conn->sent++;
5135 if (conn->sent == ~0)
5136 conn->sent = 0;
5137 }
5138 }
5139}
5140
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005141static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005142{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005143 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005144 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005145 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005146
5147 BT_DBG("%s", hdev->name);
5148
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005149 if (!hci_conn_num(hdev, LE_LINK))
5150 return;
5151
Marcel Holtmann4a964402014-07-02 19:10:33 +02005152 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005153 /* LE tx timeout must be longer than maximum
5154 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005155 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005156 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005157 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005158 }
5159
5160 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005161 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005162 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005163 u32 priority = (skb_peek(&chan->data_q))->priority;
5164 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005165 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005166 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005167
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005168 /* Stop if priority has changed */
5169 if (skb->priority < priority)
5170 break;
5171
5172 skb = skb_dequeue(&chan->data_q);
5173
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005174 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005175 hdev->le_last_tx = jiffies;
5176
5177 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005178 chan->sent++;
5179 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005180 }
5181 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005182
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005183 if (hdev->le_pkts)
5184 hdev->le_cnt = cnt;
5185 else
5186 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005187
5188 if (cnt != tmp)
5189 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005190}
5191
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005192static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005194 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195 struct sk_buff *skb;
5196
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005197 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005198 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
Marcel Holtmann52de5992013-09-03 18:08:38 -07005200 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5201 /* Schedule queues and send stuff to HCI driver */
5202 hci_sched_acl(hdev);
5203 hci_sched_sco(hdev);
5204 hci_sched_esco(hdev);
5205 hci_sched_le(hdev);
5206 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005207
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 /* Send next queued raw (unknown type) packet */
5209 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005210 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211}
5212
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005213/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005214
5215/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005216static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217{
5218 struct hci_acl_hdr *hdr = (void *) skb->data;
5219 struct hci_conn *conn;
5220 __u16 handle, flags;
5221
5222 skb_pull(skb, HCI_ACL_HDR_SIZE);
5223
5224 handle = __le16_to_cpu(hdr->handle);
5225 flags = hci_flags(handle);
5226 handle = hci_handle(handle);
5227
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005228 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005229 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230
5231 hdev->stat.acl_rx++;
5232
5233 hci_dev_lock(hdev);
5234 conn = hci_conn_hash_lookup_handle(hdev, handle);
5235 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005236
Linus Torvalds1da177e2005-04-16 15:20:36 -07005237 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005238 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005239
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005241 l2cap_recv_acldata(conn, skb, flags);
5242 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005244 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005245 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 }
5247
5248 kfree_skb(skb);
5249}
5250
5251/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005252static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253{
5254 struct hci_sco_hdr *hdr = (void *) skb->data;
5255 struct hci_conn *conn;
5256 __u16 handle;
5257
5258 skb_pull(skb, HCI_SCO_HDR_SIZE);
5259
5260 handle = __le16_to_cpu(hdr->handle);
5261
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005262 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263
5264 hdev->stat.sco_rx++;
5265
5266 hci_dev_lock(hdev);
5267 conn = hci_conn_hash_lookup_handle(hdev, handle);
5268 hci_dev_unlock(hdev);
5269
5270 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005272 sco_recv_scodata(conn, skb);
5273 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005274 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005275 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005276 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 }
5278
5279 kfree_skb(skb);
5280}
5281
Johan Hedberg9238f362013-03-05 20:37:48 +02005282static bool hci_req_is_complete(struct hci_dev *hdev)
5283{
5284 struct sk_buff *skb;
5285
5286 skb = skb_peek(&hdev->cmd_q);
5287 if (!skb)
5288 return true;
5289
5290 return bt_cb(skb)->req.start;
5291}
5292
Johan Hedberg42c6b122013-03-05 20:37:49 +02005293static void hci_resend_last(struct hci_dev *hdev)
5294{
5295 struct hci_command_hdr *sent;
5296 struct sk_buff *skb;
5297 u16 opcode;
5298
5299 if (!hdev->sent_cmd)
5300 return;
5301
5302 sent = (void *) hdev->sent_cmd->data;
5303 opcode = __le16_to_cpu(sent->opcode);
5304 if (opcode == HCI_OP_RESET)
5305 return;
5306
5307 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5308 if (!skb)
5309 return;
5310
5311 skb_queue_head(&hdev->cmd_q, skb);
5312 queue_work(hdev->workqueue, &hdev->cmd_work);
5313}
5314
Johan Hedberg9238f362013-03-05 20:37:48 +02005315void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5316{
5317 hci_req_complete_t req_complete = NULL;
5318 struct sk_buff *skb;
5319 unsigned long flags;
5320
5321 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5322
Johan Hedberg42c6b122013-03-05 20:37:49 +02005323 /* If the completed command doesn't match the last one that was
5324 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005325 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005326 if (!hci_sent_cmd_data(hdev, opcode)) {
5327 /* Some CSR based controllers generate a spontaneous
5328 * reset complete event during init and any pending
5329 * command will never be completed. In such a case we
5330 * need to resend whatever was the last sent
5331 * command.
5332 */
5333 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5334 hci_resend_last(hdev);
5335
Johan Hedberg9238f362013-03-05 20:37:48 +02005336 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005337 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005338
5339 /* If the command succeeded and there's still more commands in
5340 * this request the request is not yet complete.
5341 */
5342 if (!status && !hci_req_is_complete(hdev))
5343 return;
5344
5345 /* If this was the last command in a request the complete
5346 * callback would be found in hdev->sent_cmd instead of the
5347 * command queue (hdev->cmd_q).
5348 */
5349 if (hdev->sent_cmd) {
5350 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005351
5352 if (req_complete) {
5353 /* We must set the complete callback to NULL to
5354 * avoid calling the callback more than once if
5355 * this function gets called again.
5356 */
5357 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5358
Johan Hedberg9238f362013-03-05 20:37:48 +02005359 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005360 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005361 }
5362
5363 /* Remove all pending commands belonging to this request */
5364 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5365 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5366 if (bt_cb(skb)->req.start) {
5367 __skb_queue_head(&hdev->cmd_q, skb);
5368 break;
5369 }
5370
5371 req_complete = bt_cb(skb)->req.complete;
5372 kfree_skb(skb);
5373 }
5374 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5375
5376call_complete:
5377 if (req_complete)
5378 req_complete(hdev, status);
5379}
5380
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005381static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005383 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 struct sk_buff *skb;
5385
5386 BT_DBG("%s", hdev->name);
5387
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005389 /* Send copy to monitor */
5390 hci_send_to_monitor(hdev, skb);
5391
Linus Torvalds1da177e2005-04-16 15:20:36 -07005392 if (atomic_read(&hdev->promisc)) {
5393 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005394 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 }
5396
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005397 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 kfree_skb(skb);
5399 continue;
5400 }
5401
5402 if (test_bit(HCI_INIT, &hdev->flags)) {
5403 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005404 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 case HCI_ACLDATA_PKT:
5406 case HCI_SCODATA_PKT:
5407 kfree_skb(skb);
5408 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 }
5411
5412 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005413 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005415 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416 hci_event_packet(hdev, skb);
5417 break;
5418
5419 case HCI_ACLDATA_PKT:
5420 BT_DBG("%s ACL data packet", hdev->name);
5421 hci_acldata_packet(hdev, skb);
5422 break;
5423
5424 case HCI_SCODATA_PKT:
5425 BT_DBG("%s SCO data packet", hdev->name);
5426 hci_scodata_packet(hdev, skb);
5427 break;
5428
5429 default:
5430 kfree_skb(skb);
5431 break;
5432 }
5433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434}
5435
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005436static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005438 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005439 struct sk_buff *skb;
5440
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005441 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5442 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005445 if (atomic_read(&hdev->cmd_cnt)) {
5446 skb = skb_dequeue(&hdev->cmd_q);
5447 if (!skb)
5448 return;
5449
Wei Yongjun7585b972009-02-25 18:29:52 +08005450 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005452 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005453 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005455 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005456 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005457 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005458 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005459 schedule_delayed_work(&hdev->cmd_timer,
5460 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005461 } else {
5462 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005463 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464 }
5465 }
5466}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005467
5468void hci_req_add_le_scan_disable(struct hci_request *req)
5469{
5470 struct hci_cp_le_set_scan_enable cp;
5471
5472 memset(&cp, 0, sizeof(cp));
5473 cp.enable = LE_SCAN_DISABLE;
5474 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5475}
Andre Guedesa4790db2014-02-26 20:21:47 -03005476
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005477static void add_to_white_list(struct hci_request *req,
5478 struct hci_conn_params *params)
5479{
5480 struct hci_cp_le_add_to_white_list cp;
5481
5482 cp.bdaddr_type = params->addr_type;
5483 bacpy(&cp.bdaddr, &params->addr);
5484
5485 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5486}
5487
5488static u8 update_white_list(struct hci_request *req)
5489{
5490 struct hci_dev *hdev = req->hdev;
5491 struct hci_conn_params *params;
5492 struct bdaddr_list *b;
5493 uint8_t white_list_entries = 0;
5494
5495 /* Go through the current white list programmed into the
5496 * controller one by one and check if that address is still
5497 * in the list of pending connections or list of devices to
5498 * report. If not present in either list, then queue the
5499 * command to remove it from the controller.
5500 */
5501 list_for_each_entry(b, &hdev->le_white_list, list) {
5502 struct hci_cp_le_del_from_white_list cp;
5503
5504 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5505 &b->bdaddr, b->bdaddr_type) ||
5506 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5507 &b->bdaddr, b->bdaddr_type)) {
5508 white_list_entries++;
5509 continue;
5510 }
5511
5512 cp.bdaddr_type = b->bdaddr_type;
5513 bacpy(&cp.bdaddr, &b->bdaddr);
5514
5515 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5516 sizeof(cp), &cp);
5517 }
5518
5519 /* Since all no longer valid white list entries have been
5520 * removed, walk through the list of pending connections
5521 * and ensure that any new device gets programmed into
5522 * the controller.
5523 *
5524 * If the list of the devices is larger than the list of
5525 * available white list entries in the controller, then
5526 * just abort and return filer policy value to not use the
5527 * white list.
5528 */
5529 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5530 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5531 &params->addr, params->addr_type))
5532 continue;
5533
5534 if (white_list_entries >= hdev->le_white_list_size) {
5535 /* Select filter policy to accept all advertising */
5536 return 0x00;
5537 }
5538
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005539 if (hci_find_irk_by_addr(hdev, &params->addr,
5540 params->addr_type)) {
5541 /* White list can not be used with RPAs */
5542 return 0x00;
5543 }
5544
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005545 white_list_entries++;
5546 add_to_white_list(req, params);
5547 }
5548
5549 /* After adding all new pending connections, walk through
5550 * the list of pending reports and also add these to the
5551 * white list if there is still space.
5552 */
5553 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5554 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5555 &params->addr, params->addr_type))
5556 continue;
5557
5558 if (white_list_entries >= hdev->le_white_list_size) {
5559 /* Select filter policy to accept all advertising */
5560 return 0x00;
5561 }
5562
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005563 if (hci_find_irk_by_addr(hdev, &params->addr,
5564 params->addr_type)) {
5565 /* White list can not be used with RPAs */
5566 return 0x00;
5567 }
5568
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005569 white_list_entries++;
5570 add_to_white_list(req, params);
5571 }
5572
5573 /* Select filter policy to use white list */
5574 return 0x01;
5575}
5576
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005577void hci_req_add_le_passive_scan(struct hci_request *req)
5578{
5579 struct hci_cp_le_set_scan_param param_cp;
5580 struct hci_cp_le_set_scan_enable enable_cp;
5581 struct hci_dev *hdev = req->hdev;
5582 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005583 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005584
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005585 /* Set require_privacy to false since no SCAN_REQ are send
5586 * during passive scanning. Not using an unresolvable address
5587 * here is important so that peer devices using direct
5588 * advertising with our address will be correctly reported
5589 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005590 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005591 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005592 return;
5593
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005594 /* Adding or removing entries from the white list must
5595 * happen before enabling scanning. The controller does
5596 * not allow white list modification while scanning.
5597 */
5598 filter_policy = update_white_list(req);
5599
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005600 memset(&param_cp, 0, sizeof(param_cp));
5601 param_cp.type = LE_SCAN_PASSIVE;
5602 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5603 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5604 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005605 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005606 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5607 &param_cp);
5608
5609 memset(&enable_cp, 0, sizeof(enable_cp));
5610 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005611 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005612 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5613 &enable_cp);
5614}
5615
Andre Guedesa4790db2014-02-26 20:21:47 -03005616static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5617{
5618 if (status)
5619 BT_DBG("HCI request failed to update background scanning: "
5620 "status 0x%2.2x", status);
5621}
5622
5623/* This function controls the background scanning based on hdev->pend_le_conns
5624 * list. If there are pending LE connection we start the background scanning,
5625 * otherwise we stop it.
5626 *
5627 * This function requires the caller holds hdev->lock.
5628 */
5629void hci_update_background_scan(struct hci_dev *hdev)
5630{
Andre Guedesa4790db2014-02-26 20:21:47 -03005631 struct hci_request req;
5632 struct hci_conn *conn;
5633 int err;
5634
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005635 if (!test_bit(HCI_UP, &hdev->flags) ||
5636 test_bit(HCI_INIT, &hdev->flags) ||
5637 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005638 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005639 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005640 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005641 return;
5642
Johan Hedberga70f4b52014-07-07 15:19:50 +03005643 /* No point in doing scanning if LE support hasn't been enabled */
5644 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5645 return;
5646
Johan Hedbergae23ada2014-07-07 13:24:59 +03005647 /* If discovery is active don't interfere with it */
5648 if (hdev->discovery.state != DISCOVERY_STOPPED)
5649 return;
5650
Andre Guedesa4790db2014-02-26 20:21:47 -03005651 hci_req_init(&req, hdev);
5652
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005653 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005654 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005655 /* If there is no pending LE connections or devices
5656 * to be scanned for, we should stop the background
5657 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005658 */
5659
5660 /* If controller is not scanning we are done. */
5661 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5662 return;
5663
5664 hci_req_add_le_scan_disable(&req);
5665
5666 BT_DBG("%s stopping background scanning", hdev->name);
5667 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005668 /* If there is at least one pending LE connection, we should
5669 * keep the background scan running.
5670 */
5671
Andre Guedesa4790db2014-02-26 20:21:47 -03005672 /* If controller is connecting, we should not start scanning
5673 * since some controllers are not able to scan and connect at
5674 * the same time.
5675 */
5676 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5677 if (conn)
5678 return;
5679
Andre Guedes4340a122014-03-10 18:26:24 -03005680 /* If controller is currently scanning, we stop it to ensure we
5681 * don't miss any advertising (due to duplicates filter).
5682 */
5683 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5684 hci_req_add_le_scan_disable(&req);
5685
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005686 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005687
5688 BT_DBG("%s starting background scanning", hdev->name);
5689 }
5690
5691 err = hci_req_run(&req, update_background_scan_complete);
5692 if (err)
5693 BT_ERR("Failed to run HCI request: err %d", err);
5694}
Johan Hedberg432df052014-08-01 11:13:31 +03005695
Johan Hedberg22f433d2014-08-01 11:13:32 +03005696static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5697{
5698 struct bdaddr_list *b;
5699
5700 list_for_each_entry(b, &hdev->whitelist, list) {
5701 struct hci_conn *conn;
5702
5703 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5704 if (!conn)
5705 return true;
5706
5707 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5708 return true;
5709 }
5710
5711 return false;
5712}
5713
Johan Hedberg432df052014-08-01 11:13:31 +03005714void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5715{
5716 u8 scan;
5717
5718 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5719 return;
5720
5721 if (!hdev_is_powered(hdev))
5722 return;
5723
5724 if (mgmt_powering_down(hdev))
5725 return;
5726
5727 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005728 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005729 scan = SCAN_PAGE;
5730 else
5731 scan = SCAN_DISABLED;
5732
5733 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5734 return;
5735
5736 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5737 scan |= SCAN_INQUIRY;
5738
5739 if (req)
5740 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5741 else
5742 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5743}