blob: 2fa9f2b2bee3d06e099550cb40551e410ae18b2b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700435 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700494 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700522 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
Marcel Holtmannac345812014-02-23 12:44:25 -0800598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
Johan Hedberga1f4c312014-02-27 14:05:41 +0200606 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800609 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700677{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700680
Marcel Holtmann111902f2014-06-21 04:53:17 +0200681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
685}
686
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
695
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
698
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700704 return -EINVAL;
705
Marcel Holtmann111902f2014-06-21 04:53:17 +0200706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700708
Marcel Holtmann111902f2014-06-21 04:53:17 +0200709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800710
711 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700712}
713
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
Marcel Holtmann92202182013-10-18 16:38:10 -0700720
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
Marcel Holtmann3698d702014-02-18 21:54:49 -0800746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200749 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800750
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200757 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200778 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700779
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800785 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200786 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700811 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700839 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200859static int conn_latency_set(void *data, u64 val)
860{
861 struct hci_dev *hdev = data;
862
863 if (val > 0x01f3)
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
867 hdev->le_conn_latency = val;
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
873static int conn_latency_get(void *data, u64 *val)
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
878 *val = hdev->le_conn_latency;
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
886
Marcel Holtmannf1649572014-06-30 12:34:38 +0200887static int supervision_timeout_set(void *data, u64 val)
888{
889 struct hci_dev *hdev = data;
890
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
899}
900
901static int supervision_timeout_get(void *data, u64 *val)
902{
903 struct hci_dev *hdev = data;
904
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
908
909 return 0;
910}
911
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
914
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
918
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
921
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
925
926 return 0;
927}
928
929static int adv_channel_map_get(void *data, u64 *val)
930{
931 struct hci_dev *hdev = data;
932
933 hci_dev_lock(hdev);
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
936
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
942
Georg Lukas729a1052014-07-26 13:59:58 +0200943static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200944{
Georg Lukas729a1052014-07-26 13:59:58 +0200945 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200946
Georg Lukas729a1052014-07-26 13:59:58 +0200947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200948 return -EINVAL;
949
Andre Guedes7d474e02014-02-26 20:21:54 -0300950 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200951 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
Georg Lukas729a1052014-07-26 13:59:58 +0200957static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300958{
Georg Lukas729a1052014-07-26 13:59:58 +0200959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300966}
967
Georg Lukas729a1052014-07-26 13:59:58 +0200968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300972{
Georg Lukas729a1052014-07-26 13:59:58 +0200973 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300974
Georg Lukas729a1052014-07-26 13:59:58 +0200975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 return -EINVAL;
977
Georg Lukas729a1052014-07-26 13:59:58 +0200978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300981
Georg Lukas729a1052014-07-26 13:59:58 +0200982 return 0;
983}
Andre Guedes7d474e02014-02-26 20:21:54 -0300984
Georg Lukas729a1052014-07-26 13:59:58 +0200985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300988
Georg Lukas729a1052014-07-26 13:59:58 +0200989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300992
Georg Lukas729a1052014-07-26 13:59:58 +0200993 return 0;
994}
Andre Guedes7d474e02014-02-26 20:21:54 -0300995
Georg Lukas729a1052014-07-26 13:59:58 +0200996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -0300998
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200999static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001000{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001002 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001003 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Andre Guedes7d474e02014-02-26 20:21:54 -03001005 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001008 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001015}
1016
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001017static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001018{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001020}
1021
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001024 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029/* ---- HCI requests ---- */
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
Fengguang Wu77a63e02013-04-20 16:24:31 +03001053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
Johan Hedberg75e84b72013-04-02 13:35:04 +03001084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001109 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001119 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001120
1121 hdev->req_status = HCI_REQ_PEND;
1122
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
Chan-yeol Park039fada2014-10-31 14:23:06 +09001126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001129 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001130 return ERR_PTR(err);
1131 }
1132
Johan Hedberg75e84b72013-04-02 13:35:04 +03001133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001166 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001176 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 hci_req_init(&req, hdev);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 hdev->req_status = HCI_REQ_PEND;
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001189
Chan-yeol Park039fada2014-10-31 14:23:06 +09001190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001195 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001196
Chan-yeol Park039fada2014-10-31 14:23:06 +09001197 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001198 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199
Andre Guedes920c8302013-03-08 11:20:15 -03001200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 */
Andre Guedes920c8302013-03-08 11:20:15 -03001205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
1427 u8 mode;
1428
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509}
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 struct hci_dev *hdev = req->hdev;
1514
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001605 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001650 hci_setup_event_mask(req);
1651
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001664 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677
Andre Guedes9193c6e2014-07-01 18:10:09 -03001678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
Andre Guedes9193c6e2014-07-01 18:10:09 -03001695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001704 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001714}
1715
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
Marcel Holtmann109e3192014-07-23 19:24:56 +02001724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001732 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001733 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001735
1736 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001737 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001738 u8 support = 0x01;
1739 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1740 sizeof(support), &support);
1741 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001742}
1743
Johan Hedberg2177bab2013-03-05 20:37:43 +02001744static int __hci_init(struct hci_dev *hdev)
1745{
1746 int err;
1747
1748 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1749 if (err < 0)
1750 return err;
1751
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001752 /* The Device Under Test (DUT) mode is special and available for
1753 * all controller types. So just create it early on.
1754 */
1755 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1756 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1757 &dut_mode_fops);
1758 }
1759
Johan Hedberg2177bab2013-03-05 20:37:43 +02001760 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1761 * BR/EDR/LE type controllers. AMP controllers only need the
1762 * first stage init.
1763 */
1764 if (hdev->dev_type != HCI_BREDR)
1765 return 0;
1766
1767 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001771 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001775 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
1779 /* Only create debugfs entries during the initial setup
1780 * phase and not every time the controller gets powered on.
1781 */
1782 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1783 return 0;
1784
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001785 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1786 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001787 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1788 &hdev->manufacturer);
1789 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1790 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001791 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1792 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001793 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1794 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001795 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1796
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001797 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1798 &conn_info_min_age_fops);
1799 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_max_age_fops);
1801
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001802 if (lmp_bredr_capable(hdev)) {
1803 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1804 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001805 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1806 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001807 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1808 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001809 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1810 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001811 }
1812
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001813 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001814 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1815 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001816 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1817 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001818 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1819 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001820 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001821
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001822 if (lmp_sniff_capable(hdev)) {
1823 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1824 hdev, &idle_timeout_fops);
1825 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1826 hdev, &sniff_min_interval_fops);
1827 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_max_interval_fops);
1829 }
1830
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001831 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001832 debugfs_create_file("identity", 0400, hdev->debugfs,
1833 hdev, &identity_fops);
1834 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1835 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001836 debugfs_create_file("random_address", 0444, hdev->debugfs,
1837 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001838 debugfs_create_file("static_address", 0444, hdev->debugfs,
1839 hdev, &static_address_fops);
1840
1841 /* For controllers with a public address, provide a debug
1842 * option to force the usage of the configured static
1843 * address. By default the public address is used.
1844 */
1845 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1846 debugfs_create_file("force_static_address", 0644,
1847 hdev->debugfs, hdev,
1848 &force_static_address_fops);
1849
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001850 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1851 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001852 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1853 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001854 debugfs_create_file("identity_resolving_keys", 0400,
1855 hdev->debugfs, hdev,
1856 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001857 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1858 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001859 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1860 hdev, &conn_min_interval_fops);
1861 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1862 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001863 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1864 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001865 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1866 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001867 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1868 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001869 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1870 hdev, &adv_min_interval_fops);
1871 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1872 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001873 debugfs_create_u16("discov_interleaved_timeout", 0644,
1874 hdev->debugfs,
1875 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001876
Johan Hedberg711eafe2014-08-08 09:32:52 +03001877 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001879
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001880 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001881}
1882
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001883static void hci_init0_req(struct hci_request *req, unsigned long opt)
1884{
1885 struct hci_dev *hdev = req->hdev;
1886
1887 BT_DBG("%s %ld", hdev->name, opt);
1888
1889 /* Reset */
1890 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1891 hci_reset_req(req, 0);
1892
1893 /* Read Local Version */
1894 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1895
1896 /* Read BD Address */
1897 if (hdev->set_bdaddr)
1898 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1899}
1900
1901static int __hci_unconf_init(struct hci_dev *hdev)
1902{
1903 int err;
1904
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001905 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1906 return 0;
1907
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001908 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1909 if (err < 0)
1910 return err;
1911
1912 return 0;
1913}
1914
Johan Hedberg42c6b122013-03-05 20:37:49 +02001915static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 __u8 scan = opt;
1918
Johan Hedberg42c6b122013-03-05 20:37:49 +02001919 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001922 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
Johan Hedberg42c6b122013-03-05 20:37:49 +02001925static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 __u8 auth = opt;
1928
Johan Hedberg42c6b122013-03-05 20:37:49 +02001929 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001932 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933}
1934
Johan Hedberg42c6b122013-03-05 20:37:49 +02001935static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936{
1937 __u8 encrypt = opt;
1938
Johan Hedberg42c6b122013-03-05 20:37:49 +02001939 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001941 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001942 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Johan Hedberg42c6b122013-03-05 20:37:49 +02001945static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001946{
1947 __le16 policy = cpu_to_le16(opt);
1948
Johan Hedberg42c6b122013-03-05 20:37:49 +02001949 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001950
1951 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001953}
1954
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001955/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 * Device is held on return. */
1957struct hci_dev *hci_dev_get(int index)
1958{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001959 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 BT_DBG("%d", index);
1962
1963 if (index < 0)
1964 return NULL;
1965
1966 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001967 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 if (d->id == index) {
1969 hdev = hci_dev_hold(d);
1970 break;
1971 }
1972 }
1973 read_unlock(&hci_dev_list_lock);
1974 return hdev;
1975}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001978
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001979bool hci_discovery_active(struct hci_dev *hdev)
1980{
1981 struct discovery_state *discov = &hdev->discovery;
1982
Andre Guedes6fbe1952012-02-03 17:47:58 -03001983 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001984 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001985 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986 return true;
1987
Andre Guedes6fbe1952012-02-03 17:47:58 -03001988 default:
1989 return false;
1990 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991}
1992
Johan Hedbergff9ef572012-01-04 14:23:45 +02001993void hci_discovery_set_state(struct hci_dev *hdev, int state)
1994{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001995 int old_state = hdev->discovery.state;
1996
Johan Hedbergff9ef572012-01-04 14:23:45 +02001997 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1998
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001999 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002000 return;
2001
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002002 hdev->discovery.state = state;
2003
Johan Hedbergff9ef572012-01-04 14:23:45 +02002004 switch (state) {
2005 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002006 hci_update_background_scan(hdev);
2007
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002008 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002009 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002010 break;
2011 case DISCOVERY_STARTING:
2012 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002013 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002014 mgmt_discovering(hdev, 1);
2015 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002016 case DISCOVERY_RESOLVING:
2017 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002018 case DISCOVERY_STOPPING:
2019 break;
2020 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002021}
2022
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002023void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024{
Johan Hedberg30883512012-01-04 14:16:21 +02002025 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002026 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Johan Hedberg561aafb2012-01-04 13:31:59 +02002028 list_for_each_entry_safe(p, n, &cache->all, all) {
2029 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002030 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032
2033 INIT_LIST_HEAD(&cache->unknown);
2034 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035}
2036
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002037struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2038 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
Johan Hedberg30883512012-01-04 14:16:21 +02002040 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 struct inquiry_entry *e;
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Johan Hedberg561aafb2012-01-04 13:31:59 +02002045 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002047 return e;
2048 }
2049
2050 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002054 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055{
Johan Hedberg30883512012-01-04 14:16:21 +02002056 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057 struct inquiry_entry *e;
2058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002059 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060
2061 list_for_each_entry(e, &cache->unknown, list) {
2062 if (!bacmp(&e->data.bdaddr, bdaddr))
2063 return e;
2064 }
2065
2066 return NULL;
2067}
2068
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002069struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002070 bdaddr_t *bdaddr,
2071 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002072{
2073 struct discovery_state *cache = &hdev->discovery;
2074 struct inquiry_entry *e;
2075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002076 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002077
2078 list_for_each_entry(e, &cache->resolve, list) {
2079 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2080 return e;
2081 if (!bacmp(&e->data.bdaddr, bdaddr))
2082 return e;
2083 }
2084
2085 return NULL;
2086}
2087
Johan Hedberga3d4e202012-01-09 00:53:02 +02002088void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002089 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002090{
2091 struct discovery_state *cache = &hdev->discovery;
2092 struct list_head *pos = &cache->resolve;
2093 struct inquiry_entry *p;
2094
2095 list_del(&ie->list);
2096
2097 list_for_each_entry(p, &cache->resolve, list) {
2098 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002099 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002100 break;
2101 pos = &p->list;
2102 }
2103
2104 list_add(&ie->list, pos);
2105}
2106
Marcel Holtmannaf589252014-07-01 14:11:20 +02002107u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2108 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Johan Hedberg30883512012-01-04 14:16:21 +02002110 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002111 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002112 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002114 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Szymon Janc2b2fec42012-11-20 11:38:54 +01002116 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2117
Marcel Holtmannaf589252014-07-01 14:11:20 +02002118 if (!data->ssp_mode)
2119 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002120
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002122 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002123 if (!ie->data.ssp_mode)
2124 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002125
Johan Hedberga3d4e202012-01-09 00:53:02 +02002126 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002127 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002128 ie->data.rssi = data->rssi;
2129 hci_inquiry_cache_update_resolve(hdev, ie);
2130 }
2131
Johan Hedberg561aafb2012-01-04 13:31:59 +02002132 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002133 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002134
Johan Hedberg561aafb2012-01-04 13:31:59 +02002135 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002136 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002137 if (!ie) {
2138 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 goto done;
2140 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002141
2142 list_add(&ie->all, &cache->all);
2143
2144 if (name_known) {
2145 ie->name_state = NAME_KNOWN;
2146 } else {
2147 ie->name_state = NAME_NOT_KNOWN;
2148 list_add(&ie->list, &cache->unknown);
2149 }
2150
2151update:
2152 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002153 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002154 ie->name_state = NAME_KNOWN;
2155 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 memcpy(&ie->data, data, sizeof(*data));
2159 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002161
2162 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002164
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165done:
2166 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167}
2168
2169static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2170{
Johan Hedberg30883512012-01-04 14:16:21 +02002171 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 struct inquiry_info *info = (struct inquiry_info *) buf;
2173 struct inquiry_entry *e;
2174 int copied = 0;
2175
Johan Hedberg561aafb2012-01-04 13:31:59 +02002176 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002178
2179 if (copied >= num)
2180 break;
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 bacpy(&info->bdaddr, &data->bdaddr);
2183 info->pscan_rep_mode = data->pscan_rep_mode;
2184 info->pscan_period_mode = data->pscan_period_mode;
2185 info->pscan_mode = data->pscan_mode;
2186 memcpy(info->dev_class, data->dev_class, 3);
2187 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002190 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 }
2192
2193 BT_DBG("cache %p, copied %d", cache, copied);
2194 return copied;
2195}
2196
Johan Hedberg42c6b122013-03-05 20:37:49 +02002197static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198{
2199 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002200 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 struct hci_cp_inquiry cp;
2202
2203 BT_DBG("%s", hdev->name);
2204
2205 if (test_bit(HCI_INQUIRY, &hdev->flags))
2206 return;
2207
2208 /* Start Inquiry */
2209 memcpy(&cp.lap, &ir->lap, 3);
2210 cp.length = ir->length;
2211 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002212 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
2215int hci_inquiry(void __user *arg)
2216{
2217 __u8 __user *ptr = arg;
2218 struct hci_inquiry_req ir;
2219 struct hci_dev *hdev;
2220 int err = 0, do_inquiry = 0, max_rsp;
2221 long timeo;
2222 __u8 *buf;
2223
2224 if (copy_from_user(&ir, ptr, sizeof(ir)))
2225 return -EFAULT;
2226
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002227 hdev = hci_dev_get(ir.dev_id);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 err = -EBUSY;
2233 goto done;
2234 }
2235
Marcel Holtmann4a964402014-07-02 19:10:33 +02002236 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002237 err = -EOPNOTSUPP;
2238 goto done;
2239 }
2240
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002241 if (hdev->dev_type != HCI_BREDR) {
2242 err = -EOPNOTSUPP;
2243 goto done;
2244 }
2245
Johan Hedberg56f87902013-10-02 13:43:13 +03002246 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2247 err = -EOPNOTSUPP;
2248 goto done;
2249 }
2250
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002251 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002252 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002253 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002254 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 do_inquiry = 1;
2256 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002257 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Marcel Holtmann04837f62006-07-03 10:02:33 +02002259 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002260
2261 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002262 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2263 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264 if (err < 0)
2265 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002266
2267 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2268 * cleared). If it is interrupted by a signal, return -EINTR.
2269 */
NeilBrown74316202014-07-07 15:16:04 +10002270 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002271 TASK_INTERRUPTIBLE))
2272 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002275 /* for unlimited number of responses we will use buffer with
2276 * 255 entries
2277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2279
2280 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2281 * copy it to the user space.
2282 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002283 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002284 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 err = -ENOMEM;
2286 goto done;
2287 }
2288
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002289 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002291 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 BT_DBG("num_rsp %d", ir.num_rsp);
2294
2295 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2296 ptr += sizeof(ir);
2297 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002298 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002300 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 err = -EFAULT;
2302
2303 kfree(buf);
2304
2305done:
2306 hci_dev_put(hdev);
2307 return err;
2308}
2309
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002310static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 int ret = 0;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 BT_DBG("%s %p", hdev->name, hdev);
2315
2316 hci_req_lock(hdev);
2317
Johan Hovold94324962012-03-15 14:48:41 +01002318 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2319 ret = -ENODEV;
2320 goto done;
2321 }
2322
Marcel Holtmannd603b762014-07-06 12:11:14 +02002323 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2324 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002325 /* Check for rfkill but allow the HCI setup stage to
2326 * proceed (which in itself doesn't cause any RF activity).
2327 */
2328 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2329 ret = -ERFKILL;
2330 goto done;
2331 }
2332
2333 /* Check for valid public address or a configured static
2334 * random adddress, but let the HCI setup proceed to
2335 * be able to determine if there is a public address
2336 * or not.
2337 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002338 * In case of user channel usage, it is not important
2339 * if a public address or static random address is
2340 * available.
2341 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002342 * This check is only valid for BR/EDR controllers
2343 * since AMP controllers do not have an address.
2344 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002345 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2346 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002347 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2348 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2349 ret = -EADDRNOTAVAIL;
2350 goto done;
2351 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002352 }
2353
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 if (test_bit(HCI_UP, &hdev->flags)) {
2355 ret = -EALREADY;
2356 goto done;
2357 }
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (hdev->open(hdev)) {
2360 ret = -EIO;
2361 goto done;
2362 }
2363
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002364 atomic_set(&hdev->cmd_cnt, 1);
2365 set_bit(HCI_INIT, &hdev->flags);
2366
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002367 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2368 if (hdev->setup)
2369 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002370
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002371 /* The transport driver can set these quirks before
2372 * creating the HCI device or in its setup callback.
2373 *
2374 * In case any of them is set, the controller has to
2375 * start up as unconfigured.
2376 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002377 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2378 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002379 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002380
2381 /* For an unconfigured controller it is required to
2382 * read at least the version information provided by
2383 * the Read Local Version Information command.
2384 *
2385 * If the set_bdaddr driver callback is provided, then
2386 * also the original Bluetooth public device address
2387 * will be read using the Read BD Address command.
2388 */
2389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2390 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002391 }
2392
Marcel Holtmann9713c172014-07-06 12:11:15 +02002393 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* If public address change is configured, ensure that
2395 * the address gets programmed. If the driver does not
2396 * support changing the public address, fail the power
2397 * on procedure.
2398 */
2399 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2400 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002401 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2402 else
2403 ret = -EADDRNOTAVAIL;
2404 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002405
2406 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002407 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002408 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002409 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 }
2411
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002412 clear_bit(HCI_INIT, &hdev->flags);
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 if (!ret) {
2415 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002416 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 set_bit(HCI_UP, &hdev->flags);
2418 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002419 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002420 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002421 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002422 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002423 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002424 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002425 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002427 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002430 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002431 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002432 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 skb_queue_purge(&hdev->cmd_q);
2435 skb_queue_purge(&hdev->rx_q);
2436
2437 if (hdev->flush)
2438 hdev->flush(hdev);
2439
2440 if (hdev->sent_cmd) {
2441 kfree_skb(hdev->sent_cmd);
2442 hdev->sent_cmd = NULL;
2443 }
2444
2445 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002446 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
2448
2449done:
2450 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return ret;
2452}
2453
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002454/* ---- HCI ioctl helpers ---- */
2455
2456int hci_dev_open(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int err;
2460
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
2463 return -ENODEV;
2464
Marcel Holtmann4a964402014-07-02 19:10:33 +02002465 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002466 * up as user channel. Trying to bring them up as normal devices
2467 * will result into a failure. Only user channel operation is
2468 * possible.
2469 *
2470 * When this function is called for a user channel, the flag
2471 * HCI_USER_CHANNEL will be set first before attempting to
2472 * open the device.
2473 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002474 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476 err = -EOPNOTSUPP;
2477 goto done;
2478 }
2479
Johan Hedberge1d08f42013-10-01 22:44:50 +03002480 /* We need to ensure that no other power on/off work is pending
2481 * before proceeding to call hci_dev_do_open. This is
2482 * particularly important if the setup procedure has not yet
2483 * completed.
2484 */
2485 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486 cancel_delayed_work(&hdev->power_off);
2487
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002488 /* After this call it is guaranteed that the setup procedure
2489 * has finished. This means that error conditions like RFKILL
2490 * or no valid public or static random address apply.
2491 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002492 flush_workqueue(hdev->req_workqueue);
2493
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002494 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002495 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002496 * so that pairing works for them. Once the management interface
2497 * is in use this bit will be cleared again and userspace has
2498 * to explicitly enable it.
2499 */
2500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2501 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002502 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002503
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002504 err = hci_dev_do_open(hdev);
2505
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002506done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002507 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002508 return err;
2509}
2510
Johan Hedbergd7347f32014-07-04 12:37:23 +03002511/* This function requires the caller holds hdev->lock */
2512static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2513{
2514 struct hci_conn_params *p;
2515
Johan Hedbergf161dd42014-08-15 21:06:54 +03002516 list_for_each_entry(p, &hdev->le_conn_params, list) {
2517 if (p->conn) {
2518 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002519 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002520 p->conn = NULL;
2521 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002522 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002523 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524
2525 BT_DBG("All LE pending actions cleared");
2526}
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528static int hci_dev_do_close(struct hci_dev *hdev)
2529{
2530 BT_DBG("%s %p", hdev->name, hdev);
2531
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002532 cancel_delayed_work(&hdev->power_off);
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 hci_req_cancel(hdev, ENODEV);
2535 hci_req_lock(hdev);
2536
2537 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002538 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 hci_req_unlock(hdev);
2540 return 0;
2541 }
2542
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002543 /* Flush RX and TX works */
2544 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002545 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002547 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002548 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002549 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002550 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002551 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002552 }
2553
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002554 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002555 cancel_delayed_work(&hdev->service_cache);
2556
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002557 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002558
2559 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2560 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002561
Johan Hedberg76727c02014-11-18 09:00:14 +02002562 /* Avoid potential lockdep warnings from the *_flush() calls by
2563 * ensuring the workqueue is empty up front.
2564 */
2565 drain_workqueue(hdev->workqueue);
2566
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002567 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002568 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002570 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002571 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
2573 hci_notify(hdev, HCI_DEV_DOWN);
2574
2575 if (hdev->flush)
2576 hdev->flush(hdev);
2577
2578 /* Reset device */
2579 skb_queue_purge(&hdev->cmd_q);
2580 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002581 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2582 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002583 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 clear_bit(HCI_INIT, &hdev->flags);
2587 }
2588
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002589 /* flush cmd work */
2590 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 /* Drop queues */
2593 skb_queue_purge(&hdev->rx_q);
2594 skb_queue_purge(&hdev->cmd_q);
2595 skb_queue_purge(&hdev->raw_q);
2596
2597 /* Drop last sent command */
2598 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 kfree_skb(hdev->sent_cmd);
2601 hdev->sent_cmd = NULL;
2602 }
2603
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002604 kfree_skb(hdev->recv_evt);
2605 hdev->recv_evt = NULL;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /* After this point our queues are empty
2608 * and no tasks are scheduled. */
2609 hdev->close(hdev);
2610
Johan Hedberg35b973c2013-03-15 17:06:59 -05002611 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002612 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002613 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2614
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002615 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2616 if (hdev->dev_type == HCI_BREDR) {
2617 hci_dev_lock(hdev);
2618 mgmt_powered(hdev, 0);
2619 hci_dev_unlock(hdev);
2620 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002621 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002622
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002623 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002624 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002625
Johan Hedberge59fda82012-02-22 18:11:53 +02002626 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002627 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002628 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 hci_req_unlock(hdev);
2631
2632 hci_dev_put(hdev);
2633 return 0;
2634}
2635
2636int hci_dev_close(__u16 dev)
2637{
2638 struct hci_dev *hdev;
2639 int err;
2640
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002641 hdev = hci_dev_get(dev);
2642 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 err = -EBUSY;
2647 goto done;
2648 }
2649
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002650 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2651 cancel_delayed_work(&hdev->power_off);
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002655done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 hci_dev_put(hdev);
2657 return err;
2658}
2659
2660int hci_dev_reset(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int ret = 0;
2664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002665 hdev = hci_dev_get(dev);
2666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 return -ENODEV;
2668
2669 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Marcel Holtmann808a0492013-08-26 20:57:58 -07002671 if (!test_bit(HCI_UP, &hdev->flags)) {
2672 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 /* Drop queues */
2687 skb_queue_purge(&hdev->rx_q);
2688 skb_queue_purge(&hdev->cmd_q);
2689
Johan Hedberg76727c02014-11-18 09:00:14 +02002690 /* Avoid potential lockdep warnings from the *_flush() calls by
2691 * ensuring the workqueue is empty up front.
2692 */
2693 drain_workqueue(hdev->workqueue);
2694
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002695 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002696 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002698 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 if (hdev->flush)
2701 hdev->flush(hdev);
2702
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002703 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002706 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
2708done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 hci_req_unlock(hdev);
2710 hci_dev_put(hdev);
2711 return ret;
2712}
2713
2714int hci_dev_reset_stat(__u16 dev)
2715{
2716 struct hci_dev *hdev;
2717 int ret = 0;
2718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002719 hdev = hci_dev_get(dev);
2720 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 return -ENODEV;
2722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
Marcel Holtmann4a964402014-07-02 19:10:33 +02002728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2734
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002735done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 return ret;
2738}
2739
Johan Hedberg123abc02014-07-10 12:09:07 +03002740static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2741{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002742 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002743
2744 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2745
2746 if ((scan & SCAN_PAGE))
2747 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2748 &hdev->dev_flags);
2749 else
2750 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2751 &hdev->dev_flags);
2752
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002753 if ((scan & SCAN_INQUIRY)) {
2754 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2755 &hdev->dev_flags);
2756 } else {
2757 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2758 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 }
2761
Johan Hedberg123abc02014-07-10 12:09:07 +03002762 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2763 return;
2764
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002765 if (conn_changed || discov_changed) {
2766 /* In case this was disabled through mgmt */
2767 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2768
2769 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2770 mgmt_update_adv_data(hdev);
2771
Johan Hedberg123abc02014-07-10 12:09:07 +03002772 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002773 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002774}
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776int hci_dev_cmd(unsigned int cmd, void __user *arg)
2777{
2778 struct hci_dev *hdev;
2779 struct hci_dev_req dr;
2780 int err = 0;
2781
2782 if (copy_from_user(&dr, arg, sizeof(dr)))
2783 return -EFAULT;
2784
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002785 hdev = hci_dev_get(dr.dev_id);
2786 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 return -ENODEV;
2788
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2790 err = -EBUSY;
2791 goto done;
2792 }
2793
Marcel Holtmann4a964402014-07-02 19:10:33 +02002794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002795 err = -EOPNOTSUPP;
2796 goto done;
2797 }
2798
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002799 if (hdev->dev_type != HCI_BREDR) {
2800 err = -EOPNOTSUPP;
2801 goto done;
2802 }
2803
Johan Hedberg56f87902013-10-02 13:43:13 +03002804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2805 err = -EOPNOTSUPP;
2806 goto done;
2807 }
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 switch (cmd) {
2810 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002811 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2812 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 break;
2814
2815 case HCISETENCRYPT:
2816 if (!lmp_encrypt_capable(hdev)) {
2817 err = -EOPNOTSUPP;
2818 break;
2819 }
2820
2821 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2822 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002823 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 if (err)
2826 break;
2827 }
2828
Johan Hedberg01178cd2013-03-05 20:37:41 +02002829 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2830 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 break;
2832
2833 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002834 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2835 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002836
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002837 /* Ensure that the connectable and discoverable states
2838 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002839 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002840 if (!err)
2841 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 break;
2843
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002844 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002845 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2846 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002847 break;
2848
2849 case HCISETLINKMODE:
2850 hdev->link_mode = ((__u16) dr.dev_opt) &
2851 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2852 break;
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 case HCISETPTYPE:
2855 hdev->pkt_type = (__u16) dr.dev_opt;
2856 break;
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002859 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2860 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 break;
2862
2863 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002864 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2865 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 break;
2867
2868 default:
2869 err = -EINVAL;
2870 break;
2871 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002872
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002873done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 hci_dev_put(hdev);
2875 return err;
2876}
2877
2878int hci_get_dev_list(void __user *arg)
2879{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002880 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 struct hci_dev_list_req *dl;
2882 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 int n = 0, size, err;
2884 __u16 dev_num;
2885
2886 if (get_user(dev_num, (__u16 __user *) arg))
2887 return -EFAULT;
2888
2889 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2890 return -EINVAL;
2891
2892 size = sizeof(*dl) + dev_num * sizeof(*dr);
2893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002894 dl = kzalloc(size, GFP_KERNEL);
2895 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 return -ENOMEM;
2897
2898 dr = dl->dev_req;
2899
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002900 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002901 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002902 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002903
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002904 /* When the auto-off is configured it means the transport
2905 * is running, but in that case still indicate that the
2906 * device is actually down.
2907 */
2908 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2909 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002910
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002912 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 if (++n >= dev_num)
2915 break;
2916 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002917 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
2919 dl->dev_num = n;
2920 size = sizeof(*dl) + n * sizeof(*dr);
2921
2922 err = copy_to_user(arg, dl, size);
2923 kfree(dl);
2924
2925 return err ? -EFAULT : 0;
2926}
2927
2928int hci_get_dev_info(void __user *arg)
2929{
2930 struct hci_dev *hdev;
2931 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002932 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 int err = 0;
2934
2935 if (copy_from_user(&di, arg, sizeof(di)))
2936 return -EFAULT;
2937
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002938 hdev = hci_dev_get(di.dev_id);
2939 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return -ENODEV;
2941
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002942 /* When the auto-off is configured it means the transport
2943 * is running, but in that case still indicate that the
2944 * device is actually down.
2945 */
2946 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2947 flags = hdev->flags & ~BIT(HCI_UP);
2948 else
2949 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002950
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 strcpy(di.name, hdev->name);
2952 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002953 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002954 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002956 if (lmp_bredr_capable(hdev)) {
2957 di.acl_mtu = hdev->acl_mtu;
2958 di.acl_pkts = hdev->acl_pkts;
2959 di.sco_mtu = hdev->sco_mtu;
2960 di.sco_pkts = hdev->sco_pkts;
2961 } else {
2962 di.acl_mtu = hdev->le_mtu;
2963 di.acl_pkts = hdev->le_pkts;
2964 di.sco_mtu = 0;
2965 di.sco_pkts = 0;
2966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 di.link_policy = hdev->link_policy;
2968 di.link_mode = hdev->link_mode;
2969
2970 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2971 memcpy(&di.features, &hdev->features, sizeof(di.features));
2972
2973 if (copy_to_user(arg, &di, sizeof(di)))
2974 err = -EFAULT;
2975
2976 hci_dev_put(hdev);
2977
2978 return err;
2979}
2980
2981/* ---- Interface to HCI drivers ---- */
2982
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002983static int hci_rfkill_set_block(void *data, bool blocked)
2984{
2985 struct hci_dev *hdev = data;
2986
2987 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2988
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002989 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2990 return -EBUSY;
2991
Johan Hedberg5e130362013-09-13 08:58:17 +03002992 if (blocked) {
2993 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002994 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2995 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002996 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002997 } else {
2998 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002999 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003000
3001 return 0;
3002}
3003
3004static const struct rfkill_ops hci_rfkill_ops = {
3005 .set_block = hci_rfkill_set_block,
3006};
3007
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003008static void hci_power_on(struct work_struct *work)
3009{
3010 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003011 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003012
3013 BT_DBG("%s", hdev->name);
3014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003015 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003016 if (err < 0) {
3017 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003018 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003019 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003021 /* During the HCI setup phase, a few error conditions are
3022 * ignored and they need to be checked now. If they are still
3023 * valid, it is important to turn the device back off.
3024 */
3025 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003026 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003027 (hdev->dev_type == HCI_BREDR &&
3028 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3029 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003030 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3031 hci_dev_do_close(hdev);
3032 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003033 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3034 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003035 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003036
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003037 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003038 /* For unconfigured devices, set the HCI_RAW flag
3039 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003040 */
3041 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3042 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003043
3044 /* For fully configured devices, this will send
3045 * the Index Added event. For unconfigured devices,
3046 * it will send Unconfigued Index Added event.
3047 *
3048 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3049 * and no event will be send.
3050 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003051 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003052 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003053 /* When the controller is now configured, then it
3054 * is important to clear the HCI_RAW flag.
3055 */
3056 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 clear_bit(HCI_RAW, &hdev->flags);
3058
Marcel Holtmannd603b762014-07-06 12:11:14 +02003059 /* Powering on the controller with HCI_CONFIG set only
3060 * happens with the transition from unconfigured to
3061 * configured. This will send the Index Added event.
3062 */
3063 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003064 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003065}
3066
3067static void hci_power_off(struct work_struct *work)
3068{
Johan Hedberg32435532011-11-07 22:16:04 +02003069 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003070 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003071
3072 BT_DBG("%s", hdev->name);
3073
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003074 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003075}
3076
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003077static void hci_discov_off(struct work_struct *work)
3078{
3079 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003080
3081 hdev = container_of(work, struct hci_dev, discov_off.work);
3082
3083 BT_DBG("%s", hdev->name);
3084
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003085 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003086}
3087
Johan Hedberg35f74982014-02-18 17:14:32 +02003088void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003089{
Johan Hedberg48210022013-01-27 00:31:28 +02003090 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003091
Johan Hedberg48210022013-01-27 00:31:28 +02003092 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3093 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003094 kfree(uuid);
3095 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003096}
3097
Johan Hedberg35f74982014-02-18 17:14:32 +02003098void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003099{
Johan Hedberg0378b592014-11-19 15:22:22 +02003100 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101
Johan Hedberg0378b592014-11-19 15:22:22 +02003102 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3103 list_del_rcu(&key->list);
3104 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003105 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003106}
3107
Johan Hedberg35f74982014-02-18 17:14:32 +02003108void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003110 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111
Johan Hedberg970d0f12014-11-13 14:37:47 +02003112 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3113 list_del_rcu(&k->list);
3114 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003116}
3117
Johan Hedberg970c4e42014-02-18 10:19:33 +02003118void hci_smp_irks_clear(struct hci_dev *hdev)
3119{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003120 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003121
Johan Hedbergadae20c2014-11-13 14:37:48 +02003122 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3123 list_del_rcu(&k->list);
3124 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003125 }
3126}
3127
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3129{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003130 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003131
Johan Hedberg0378b592014-11-19 15:22:22 +02003132 rcu_read_lock();
3133 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3134 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3135 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003136 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003137 }
3138 }
3139 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003140
3141 return NULL;
3142}
3143
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303144static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003145 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003146{
3147 /* Legacy key */
3148 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303149 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003150
3151 /* Debug keys are insecure so don't store them persistently */
3152 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303153 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003154
3155 /* Changed combination key and there's no previous one */
3156 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303157 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003158
3159 /* Security mode 3 case */
3160 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003162
Johan Hedberge3befab2014-06-01 16:33:39 +03003163 /* BR/EDR key derived using SC from an LE link */
3164 if (conn->type == LE_LINK)
3165 return true;
3166
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003167 /* Neither local nor remote side had no-bonding as requirement */
3168 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303169 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003170
3171 /* Local side had dedicated bonding as requirement */
3172 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003174
3175 /* Remote side had dedicated bonding as requirement */
3176 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303177 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003178
3179 /* If none of the above criteria match, then don't store the key
3180 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303181 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003182}
3183
Johan Hedberge804d252014-07-16 11:42:28 +03003184static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003185{
Johan Hedberge804d252014-07-16 11:42:28 +03003186 if (type == SMP_LTK)
3187 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003188
Johan Hedberge804d252014-07-16 11:42:28 +03003189 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003190}
3191
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003192struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3193 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003194{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003195 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003196
Johan Hedberg970d0f12014-11-13 14:37:47 +02003197 rcu_read_lock();
3198 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003199 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3200 continue;
3201
3202 if (smp_ltk_is_sc(k)) {
3203 if (k->type == SMP_LTK_P256_DEBUG &&
3204 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3205 continue;
3206 rcu_read_unlock();
3207 return k;
3208 }
3209
3210 if (ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003211 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003212 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003213 }
3214 }
3215 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003216
3217 return NULL;
3218}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003219
Johan Hedberg970c4e42014-02-18 10:19:33 +02003220struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3221{
3222 struct smp_irk *irk;
3223
Johan Hedbergadae20c2014-11-13 14:37:48 +02003224 rcu_read_lock();
3225 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3226 if (!bacmp(&irk->rpa, rpa)) {
3227 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003228 return irk;
3229 }
3230 }
3231
Johan Hedbergadae20c2014-11-13 14:37:48 +02003232 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3233 if (smp_irk_matches(hdev, irk->val, rpa)) {
3234 bacpy(&irk->rpa, rpa);
3235 rcu_read_unlock();
3236 return irk;
3237 }
3238 }
3239 rcu_read_unlock();
3240
Johan Hedberg970c4e42014-02-18 10:19:33 +02003241 return NULL;
3242}
3243
3244struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3245 u8 addr_type)
3246{
3247 struct smp_irk *irk;
3248
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003249 /* Identity Address must be public or static random */
3250 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3251 return NULL;
3252
Johan Hedbergadae20c2014-11-13 14:37:48 +02003253 rcu_read_lock();
3254 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003255 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003256 bacmp(bdaddr, &irk->bdaddr) == 0) {
3257 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003258 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003259 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003260 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003261 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003262
3263 return NULL;
3264}
3265
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003266struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003267 bdaddr_t *bdaddr, u8 *val, u8 type,
3268 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003269{
3270 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303271 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003272
3273 old_key = hci_find_link_key(hdev, bdaddr);
3274 if (old_key) {
3275 old_key_type = old_key->type;
3276 key = old_key;
3277 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003278 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003279 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003280 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003281 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003282 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003283 }
3284
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003285 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003286
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003287 /* Some buggy controller combinations generate a changed
3288 * combination key for legacy pairing even when there's no
3289 * previous key */
3290 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003291 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003292 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003293 if (conn)
3294 conn->key_type = type;
3295 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003296
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003297 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003298 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003299 key->pin_len = pin_len;
3300
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003301 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003302 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003303 else
3304 key->type = type;
3305
Johan Hedberg7652ff62014-06-24 13:15:49 +03003306 if (persistent)
3307 *persistent = hci_persistent_key(hdev, conn, type,
3308 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003309
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003310 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003311}
3312
Johan Hedbergca9142b2014-02-19 14:57:44 +02003313struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003314 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003315 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003316{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003317 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003318 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003319
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003320 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003321 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003322 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003323 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003324 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003325 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003326 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003327 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003328 }
3329
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003330 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003331 key->bdaddr_type = addr_type;
3332 memcpy(key->val, tk, sizeof(key->val));
3333 key->authenticated = authenticated;
3334 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003335 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003336 key->enc_size = enc_size;
3337 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003338
Johan Hedbergca9142b2014-02-19 14:57:44 +02003339 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003340}
3341
Johan Hedbergca9142b2014-02-19 14:57:44 +02003342struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3343 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003344{
3345 struct smp_irk *irk;
3346
3347 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3348 if (!irk) {
3349 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3350 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003351 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003352
3353 bacpy(&irk->bdaddr, bdaddr);
3354 irk->addr_type = addr_type;
3355
Johan Hedbergadae20c2014-11-13 14:37:48 +02003356 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003357 }
3358
3359 memcpy(irk->val, val, 16);
3360 bacpy(&irk->rpa, rpa);
3361
Johan Hedbergca9142b2014-02-19 14:57:44 +02003362 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003363}
3364
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003365int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3366{
3367 struct link_key *key;
3368
3369 key = hci_find_link_key(hdev, bdaddr);
3370 if (!key)
3371 return -ENOENT;
3372
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003373 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003374
Johan Hedberg0378b592014-11-19 15:22:22 +02003375 list_del_rcu(&key->list);
3376 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003377
3378 return 0;
3379}
3380
Johan Hedberge0b2b272014-02-18 17:14:31 +02003381int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003382{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003383 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003384 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003385
Johan Hedberg970d0f12014-11-13 14:37:47 +02003386 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003387 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003388 continue;
3389
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003390 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003391
Johan Hedberg970d0f12014-11-13 14:37:47 +02003392 list_del_rcu(&k->list);
3393 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003394 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003395 }
3396
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003397 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003398}
3399
Johan Hedberga7ec7332014-02-18 17:14:35 +02003400void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3401{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003402 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003403
Johan Hedbergadae20c2014-11-13 14:37:48 +02003404 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003405 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3406 continue;
3407
3408 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3409
Johan Hedbergadae20c2014-11-13 14:37:48 +02003410 list_del_rcu(&k->list);
3411 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003412 }
3413}
3414
Ville Tervo6bd32322011-02-16 16:32:41 +02003415/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003416static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003417{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003418 struct hci_dev *hdev = container_of(work, struct hci_dev,
3419 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003420
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003421 if (hdev->sent_cmd) {
3422 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3423 u16 opcode = __le16_to_cpu(sent->opcode);
3424
3425 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3426 } else {
3427 BT_ERR("%s command tx timeout", hdev->name);
3428 }
3429
Ville Tervo6bd32322011-02-16 16:32:41 +02003430 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003431 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003432}
3433
Szymon Janc2763eda2011-03-22 13:12:22 +01003434struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003435 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003436{
3437 struct oob_data *data;
3438
3439 list_for_each_entry(data, &hdev->remote_oob_data, list)
3440 if (bacmp(bdaddr, &data->bdaddr) == 0)
3441 return data;
3442
3443 return NULL;
3444}
3445
3446int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3447{
3448 struct oob_data *data;
3449
3450 data = hci_find_remote_oob_data(hdev, bdaddr);
3451 if (!data)
3452 return -ENOENT;
3453
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003454 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003455
3456 list_del(&data->list);
3457 kfree(data);
3458
3459 return 0;
3460}
3461
Johan Hedberg35f74982014-02-18 17:14:32 +02003462void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003463{
3464 struct oob_data *data, *n;
3465
3466 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3467 list_del(&data->list);
3468 kfree(data);
3469 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003470}
3471
Marcel Holtmann07988722014-01-10 02:07:29 -08003472int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003473 u8 *hash, u8 *rand)
Szymon Janc2763eda2011-03-22 13:12:22 +01003474{
3475 struct oob_data *data;
3476
3477 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003478 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003479 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003480 if (!data)
3481 return -ENOMEM;
3482
3483 bacpy(&data->bdaddr, bdaddr);
3484 list_add(&data->list, &hdev->remote_oob_data);
3485 }
3486
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003487 memcpy(data->hash192, hash, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003488 memcpy(data->rand192, rand, sizeof(data->rand192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003489
Marcel Holtmann07988722014-01-10 02:07:29 -08003490 memset(data->hash256, 0, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003491 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003492
3493 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3494
3495 return 0;
3496}
3497
3498int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003499 u8 *hash192, u8 *rand192,
3500 u8 *hash256, u8 *rand256)
Marcel Holtmann07988722014-01-10 02:07:29 -08003501{
3502 struct oob_data *data;
3503
3504 data = hci_find_remote_oob_data(hdev, bdaddr);
3505 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003506 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003507 if (!data)
3508 return -ENOMEM;
3509
3510 bacpy(&data->bdaddr, bdaddr);
3511 list_add(&data->list, &hdev->remote_oob_data);
3512 }
3513
3514 memcpy(data->hash192, hash192, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003515 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003516
3517 memcpy(data->hash256, hash256, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003518 memcpy(data->rand256, rand256, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003519
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003520 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003521
3522 return 0;
3523}
3524
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003525struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003526 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003527{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003528 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003529
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003530 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003531 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003532 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003533 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003534
3535 return NULL;
3536}
3537
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003538void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003539{
3540 struct list_head *p, *n;
3541
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003542 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003543 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544
3545 list_del(p);
3546 kfree(b);
3547 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003548}
3549
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003550int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003551{
3552 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003553
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003554 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003555 return -EBADF;
3556
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003557 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003558 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003559
Johan Hedberg27f70f32014-07-21 10:50:06 +03003560 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003561 if (!entry)
3562 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003563
3564 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003565 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003566
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003567 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003568
3569 return 0;
3570}
3571
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003572int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003573{
3574 struct bdaddr_list *entry;
3575
Johan Hedberg35f74982014-02-18 17:14:32 +02003576 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003578 return 0;
3579 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003580
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003581 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003582 if (!entry)
3583 return -ENOENT;
3584
3585 list_del(&entry->list);
3586 kfree(entry);
3587
3588 return 0;
3589}
3590
Andre Guedes15819a72014-02-03 13:56:18 -03003591/* This function requires the caller holds hdev->lock */
3592struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3593 bdaddr_t *addr, u8 addr_type)
3594{
3595 struct hci_conn_params *params;
3596
Johan Hedberg738f6182014-07-03 19:33:51 +03003597 /* The conn params list only contains identity addresses */
3598 if (!hci_is_identity_address(addr, addr_type))
3599 return NULL;
3600
Andre Guedes15819a72014-02-03 13:56:18 -03003601 list_for_each_entry(params, &hdev->le_conn_params, list) {
3602 if (bacmp(&params->addr, addr) == 0 &&
3603 params->addr_type == addr_type) {
3604 return params;
3605 }
3606 }
3607
3608 return NULL;
3609}
3610
Andre Guedescef952c2014-02-26 20:21:49 -03003611static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3612{
3613 struct hci_conn *conn;
3614
3615 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3616 if (!conn)
3617 return false;
3618
3619 if (conn->dst_type != type)
3620 return false;
3621
3622 if (conn->state != BT_CONNECTED)
3623 return false;
3624
3625 return true;
3626}
3627
Andre Guedes15819a72014-02-03 13:56:18 -03003628/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003629struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3630 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003631{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003632 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003633
Johan Hedberg738f6182014-07-03 19:33:51 +03003634 /* The list only contains identity addresses */
3635 if (!hci_is_identity_address(addr, addr_type))
3636 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003637
Johan Hedberg501f8822014-07-04 12:37:26 +03003638 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003639 if (bacmp(&param->addr, addr) == 0 &&
3640 param->addr_type == addr_type)
3641 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003642 }
3643
3644 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003645}
3646
3647/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003648struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3649 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003650{
3651 struct hci_conn_params *params;
3652
Johan Hedbergc46245b2014-07-02 17:37:33 +03003653 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003654 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003655
Andre Guedes15819a72014-02-03 13:56:18 -03003656 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003657 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003658 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003659
3660 params = kzalloc(sizeof(*params), GFP_KERNEL);
3661 if (!params) {
3662 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003663 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003664 }
3665
3666 bacpy(&params->addr, addr);
3667 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003668
3669 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003670 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003671
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003672 params->conn_min_interval = hdev->le_conn_min_interval;
3673 params->conn_max_interval = hdev->le_conn_max_interval;
3674 params->conn_latency = hdev->le_conn_latency;
3675 params->supervision_timeout = hdev->le_supv_timeout;
3676 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3677
3678 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3679
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003680 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003681}
3682
3683/* This function requires the caller holds hdev->lock */
3684int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003685 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003686{
3687 struct hci_conn_params *params;
3688
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003689 params = hci_conn_params_add(hdev, addr, addr_type);
3690 if (!params)
3691 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003692
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003693 if (params->auto_connect == auto_connect)
3694 return 0;
3695
Johan Hedberg95305ba2014-07-04 12:37:21 +03003696 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003697
Andre Guedescef952c2014-02-26 20:21:49 -03003698 switch (auto_connect) {
3699 case HCI_AUTO_CONN_DISABLED:
3700 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003701 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003702 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003703 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003704 list_add(&params->action, &hdev->pend_le_reports);
3705 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003706 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003707 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003708 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003709 if (!is_connected(hdev, addr, addr_type)) {
3710 list_add(&params->action, &hdev->pend_le_conns);
3711 hci_update_background_scan(hdev);
3712 }
Andre Guedescef952c2014-02-26 20:21:49 -03003713 break;
3714 }
Andre Guedes15819a72014-02-03 13:56:18 -03003715
Johan Hedberg851efca2014-07-02 22:42:00 +03003716 params->auto_connect = auto_connect;
3717
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003718 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3719 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003720
3721 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003722}
3723
Johan Hedbergf6c63242014-08-15 21:06:59 +03003724static void hci_conn_params_free(struct hci_conn_params *params)
3725{
3726 if (params->conn) {
3727 hci_conn_drop(params->conn);
3728 hci_conn_put(params->conn);
3729 }
3730
3731 list_del(&params->action);
3732 list_del(&params->list);
3733 kfree(params);
3734}
3735
Andre Guedes15819a72014-02-03 13:56:18 -03003736/* This function requires the caller holds hdev->lock */
3737void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3738{
3739 struct hci_conn_params *params;
3740
3741 params = hci_conn_params_lookup(hdev, addr, addr_type);
3742 if (!params)
3743 return;
3744
Johan Hedbergf6c63242014-08-15 21:06:59 +03003745 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003746
Johan Hedberg95305ba2014-07-04 12:37:21 +03003747 hci_update_background_scan(hdev);
3748
Andre Guedes15819a72014-02-03 13:56:18 -03003749 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3750}
3751
3752/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003753void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003754{
3755 struct hci_conn_params *params, *tmp;
3756
3757 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003758 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3759 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003760 list_del(&params->list);
3761 kfree(params);
3762 }
3763
Johan Hedberg55af49a2014-07-02 17:37:26 +03003764 BT_DBG("All LE disabled connection parameters were removed");
3765}
3766
3767/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003768void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003769{
3770 struct hci_conn_params *params, *tmp;
3771
Johan Hedbergf6c63242014-08-15 21:06:59 +03003772 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3773 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003774
Johan Hedberga2f41a82014-07-04 12:37:19 +03003775 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003776
Andre Guedes15819a72014-02-03 13:56:18 -03003777 BT_DBG("All LE connection parameters were removed");
3778}
3779
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003780static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003781{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003782 if (status) {
3783 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003784
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003785 hci_dev_lock(hdev);
3786 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3787 hci_dev_unlock(hdev);
3788 return;
3789 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003790}
3791
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003792static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003793{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003794 /* General inquiry access code (GIAC) */
3795 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3796 struct hci_request req;
3797 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003798 int err;
3799
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003800 if (status) {
3801 BT_ERR("Failed to disable LE scanning: status %d", status);
3802 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003803 }
3804
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003805 switch (hdev->discovery.type) {
3806 case DISCOV_TYPE_LE:
3807 hci_dev_lock(hdev);
3808 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3809 hci_dev_unlock(hdev);
3810 break;
3811
3812 case DISCOV_TYPE_INTERLEAVED:
3813 hci_req_init(&req, hdev);
3814
3815 memset(&cp, 0, sizeof(cp));
3816 memcpy(&cp.lap, lap, sizeof(cp.lap));
3817 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3818 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3819
3820 hci_dev_lock(hdev);
3821
3822 hci_inquiry_cache_flush(hdev);
3823
3824 err = hci_req_run(&req, inquiry_complete);
3825 if (err) {
3826 BT_ERR("Inquiry request failed: err %d", err);
3827 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3828 }
3829
3830 hci_dev_unlock(hdev);
3831 break;
3832 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003833}
3834
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003835static void le_scan_disable_work(struct work_struct *work)
3836{
3837 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003838 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003839 struct hci_request req;
3840 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841
3842 BT_DBG("%s", hdev->name);
3843
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003844 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003845
Andre Guedesb1efcc22014-02-26 20:21:40 -03003846 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003847
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003848 err = hci_req_run(&req, le_scan_disable_work_complete);
3849 if (err)
3850 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003851}
3852
Johan Hedberg8d972502014-02-28 12:54:14 +02003853static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3854{
3855 struct hci_dev *hdev = req->hdev;
3856
3857 /* If we're advertising or initiating an LE connection we can't
3858 * go ahead and change the random address at this time. This is
3859 * because the eventual initiator address used for the
3860 * subsequently created connection will be undefined (some
3861 * controllers use the new address and others the one we had
3862 * when the operation started).
3863 *
3864 * In this kind of scenario skip the update and let the random
3865 * address be updated at the next cycle.
3866 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003867 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003868 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3869 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003870 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003871 return;
3872 }
3873
3874 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3875}
3876
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003877int hci_update_random_address(struct hci_request *req, bool require_privacy,
3878 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003879{
3880 struct hci_dev *hdev = req->hdev;
3881 int err;
3882
3883 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003884 * current RPA has expired or there is something else than
3885 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003886 */
3887 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003888 int to;
3889
3890 *own_addr_type = ADDR_LE_DEV_RANDOM;
3891
3892 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003893 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003894 return 0;
3895
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003896 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003897 if (err < 0) {
3898 BT_ERR("%s failed to generate new RPA", hdev->name);
3899 return err;
3900 }
3901
Johan Hedberg8d972502014-02-28 12:54:14 +02003902 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003903
3904 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3905 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3906
3907 return 0;
3908 }
3909
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003910 /* In case of required privacy without resolvable private address,
3911 * use an unresolvable private address. This is useful for active
3912 * scanning and non-connectable advertising.
3913 */
3914 if (require_privacy) {
3915 bdaddr_t urpa;
3916
3917 get_random_bytes(&urpa, 6);
3918 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3919
3920 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003921 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003922 return 0;
3923 }
3924
Johan Hedbergebd3a742014-02-23 19:42:21 +02003925 /* If forcing static address is in use or there is no public
3926 * address use the static address as random address (but skip
3927 * the HCI command if the current random address is already the
3928 * static one.
3929 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003930 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003931 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3932 *own_addr_type = ADDR_LE_DEV_RANDOM;
3933 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3934 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3935 &hdev->static_addr);
3936 return 0;
3937 }
3938
3939 /* Neither privacy nor static address is being used so use a
3940 * public address.
3941 */
3942 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3943
3944 return 0;
3945}
3946
Johan Hedberga1f4c312014-02-27 14:05:41 +02003947/* Copy the Identity Address of the controller.
3948 *
3949 * If the controller has a public BD_ADDR, then by default use that one.
3950 * If this is a LE only controller without a public address, default to
3951 * the static random address.
3952 *
3953 * For debugging purposes it is possible to force controllers with a
3954 * public address to use the static random address instead.
3955 */
3956void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3957 u8 *bdaddr_type)
3958{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003959 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003960 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3961 bacpy(bdaddr, &hdev->static_addr);
3962 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3963 } else {
3964 bacpy(bdaddr, &hdev->bdaddr);
3965 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3966 }
3967}
3968
David Herrmann9be0dab2012-04-22 14:39:57 +02003969/* Alloc HCI device */
3970struct hci_dev *hci_alloc_dev(void)
3971{
3972 struct hci_dev *hdev;
3973
Johan Hedberg27f70f32014-07-21 10:50:06 +03003974 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003975 if (!hdev)
3976 return NULL;
3977
David Herrmannb1b813d2012-04-22 14:39:58 +02003978 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3979 hdev->esco_type = (ESCO_HV1);
3980 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003981 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3982 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003983 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003984 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3985 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003986
David Herrmannb1b813d2012-04-22 14:39:58 +02003987 hdev->sniff_max_interval = 800;
3988 hdev->sniff_min_interval = 80;
3989
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003990 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003991 hdev->le_adv_min_interval = 0x0800;
3992 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003993 hdev->le_scan_interval = 0x0060;
3994 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003995 hdev->le_conn_min_interval = 0x0028;
3996 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003997 hdev->le_conn_latency = 0x0000;
3998 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003999
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004000 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004001 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004002 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4003 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004004
David Herrmannb1b813d2012-04-22 14:39:58 +02004005 mutex_init(&hdev->lock);
4006 mutex_init(&hdev->req_lock);
4007
4008 INIT_LIST_HEAD(&hdev->mgmt_pending);
4009 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004010 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004011 INIT_LIST_HEAD(&hdev->uuids);
4012 INIT_LIST_HEAD(&hdev->link_keys);
4013 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004014 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004015 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004016 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004017 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004018 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004019 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004020 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004021
4022 INIT_WORK(&hdev->rx_work, hci_rx_work);
4023 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4024 INIT_WORK(&hdev->tx_work, hci_tx_work);
4025 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004026
David Herrmannb1b813d2012-04-22 14:39:58 +02004027 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4028 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4029 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4030
David Herrmannb1b813d2012-04-22 14:39:58 +02004031 skb_queue_head_init(&hdev->rx_q);
4032 skb_queue_head_init(&hdev->cmd_q);
4033 skb_queue_head_init(&hdev->raw_q);
4034
4035 init_waitqueue_head(&hdev->req_wait_q);
4036
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004037 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004038
David Herrmannb1b813d2012-04-22 14:39:58 +02004039 hci_init_sysfs(hdev);
4040 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004041
4042 return hdev;
4043}
4044EXPORT_SYMBOL(hci_alloc_dev);
4045
4046/* Free HCI device */
4047void hci_free_dev(struct hci_dev *hdev)
4048{
David Herrmann9be0dab2012-04-22 14:39:57 +02004049 /* will free via device release */
4050 put_device(&hdev->dev);
4051}
4052EXPORT_SYMBOL(hci_free_dev);
4053
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054/* Register HCI device */
4055int hci_register_dev(struct hci_dev *hdev)
4056{
David Herrmannb1b813d2012-04-22 14:39:58 +02004057 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004058
Marcel Holtmann74292d52014-07-06 15:50:27 +02004059 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004060 return -EINVAL;
4061
Mat Martineau08add512011-11-02 16:18:36 -07004062 /* Do not allow HCI_AMP devices to register at index 0,
4063 * so the index can be used as the AMP controller ID.
4064 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004065 switch (hdev->dev_type) {
4066 case HCI_BREDR:
4067 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4068 break;
4069 case HCI_AMP:
4070 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4071 break;
4072 default:
4073 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004075
Sasha Levin3df92b32012-05-27 22:36:56 +02004076 if (id < 0)
4077 return id;
4078
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 sprintf(hdev->name, "hci%d", id);
4080 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004081
4082 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4083
Kees Cookd8537542013-07-03 15:04:57 -07004084 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4085 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004086 if (!hdev->workqueue) {
4087 error = -ENOMEM;
4088 goto err;
4089 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004090
Kees Cookd8537542013-07-03 15:04:57 -07004091 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4092 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004093 if (!hdev->req_workqueue) {
4094 destroy_workqueue(hdev->workqueue);
4095 error = -ENOMEM;
4096 goto err;
4097 }
4098
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004099 if (!IS_ERR_OR_NULL(bt_debugfs))
4100 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4101
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004102 dev_set_name(&hdev->dev, "%s", hdev->name);
4103
4104 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004105 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004106 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004108 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004109 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4110 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004111 if (hdev->rfkill) {
4112 if (rfkill_register(hdev->rfkill) < 0) {
4113 rfkill_destroy(hdev->rfkill);
4114 hdev->rfkill = NULL;
4115 }
4116 }
4117
Johan Hedberg5e130362013-09-13 08:58:17 +03004118 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4119 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4120
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004121 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004122 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004123
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004124 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004125 /* Assume BR/EDR support until proven otherwise (such as
4126 * through reading supported features during init.
4127 */
4128 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4129 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004130
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004131 write_lock(&hci_dev_list_lock);
4132 list_add(&hdev->list, &hci_dev_list);
4133 write_unlock(&hci_dev_list_lock);
4134
Marcel Holtmann4a964402014-07-02 19:10:33 +02004135 /* Devices that are marked for raw-only usage are unconfigured
4136 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004137 */
4138 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004139 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004140
Linus Torvalds1da177e2005-04-16 15:20:36 -07004141 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004142 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143
Johan Hedberg19202572013-01-14 22:33:51 +02004144 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004145
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004147
David Herrmann33ca9542011-10-08 14:58:49 +02004148err_wqueue:
4149 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004150 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004151err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004152 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004153
David Herrmann33ca9542011-10-08 14:58:49 +02004154 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155}
4156EXPORT_SYMBOL(hci_register_dev);
4157
4158/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004159void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160{
Sasha Levin3df92b32012-05-27 22:36:56 +02004161 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004162
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004163 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004164
Johan Hovold94324962012-03-15 14:48:41 +01004165 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4166
Sasha Levin3df92b32012-05-27 22:36:56 +02004167 id = hdev->id;
4168
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004169 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004171 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004172
4173 hci_dev_do_close(hdev);
4174
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304175 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004176 kfree_skb(hdev->reassembly[i]);
4177
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004178 cancel_work_sync(&hdev->power_on);
4179
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004180 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004181 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4182 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004183 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004184 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004185 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004186 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004187
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004188 /* mgmt_index_removed should take care of emptying the
4189 * pending list */
4190 BUG_ON(!list_empty(&hdev->mgmt_pending));
4191
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 hci_notify(hdev, HCI_DEV_UNREG);
4193
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004194 if (hdev->rfkill) {
4195 rfkill_unregister(hdev->rfkill);
4196 rfkill_destroy(hdev->rfkill);
4197 }
4198
Johan Hedberg711eafe2014-08-08 09:32:52 +03004199 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004200
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004201 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004202
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004203 debugfs_remove_recursive(hdev->debugfs);
4204
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004205 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004206 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004207
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004208 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004209 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004210 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004211 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004212 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004213 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004214 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004215 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004216 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004217 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004218 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004219
David Herrmanndc946bd2012-01-07 15:47:24 +01004220 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004221
4222 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223}
4224EXPORT_SYMBOL(hci_unregister_dev);
4225
4226/* Suspend HCI device */
4227int hci_suspend_dev(struct hci_dev *hdev)
4228{
4229 hci_notify(hdev, HCI_DEV_SUSPEND);
4230 return 0;
4231}
4232EXPORT_SYMBOL(hci_suspend_dev);
4233
4234/* Resume HCI device */
4235int hci_resume_dev(struct hci_dev *hdev)
4236{
4237 hci_notify(hdev, HCI_DEV_RESUME);
4238 return 0;
4239}
4240EXPORT_SYMBOL(hci_resume_dev);
4241
Marcel Holtmann75e05692014-11-02 08:15:38 +01004242/* Reset HCI device */
4243int hci_reset_dev(struct hci_dev *hdev)
4244{
4245 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4246 struct sk_buff *skb;
4247
4248 skb = bt_skb_alloc(3, GFP_ATOMIC);
4249 if (!skb)
4250 return -ENOMEM;
4251
4252 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4253 memcpy(skb_put(skb, 3), hw_err, 3);
4254
4255 /* Send Hardware Error to upper stack */
4256 return hci_recv_frame(hdev, skb);
4257}
4258EXPORT_SYMBOL(hci_reset_dev);
4259
Marcel Holtmann76bca882009-11-18 00:40:39 +01004260/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004261int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004262{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004263 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004264 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004265 kfree_skb(skb);
4266 return -ENXIO;
4267 }
4268
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004269 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004270 bt_cb(skb)->incoming = 1;
4271
4272 /* Time stamp */
4273 __net_timestamp(skb);
4274
Marcel Holtmann76bca882009-11-18 00:40:39 +01004275 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004276 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004277
Marcel Holtmann76bca882009-11-18 00:40:39 +01004278 return 0;
4279}
4280EXPORT_SYMBOL(hci_recv_frame);
4281
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304282static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004283 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304284{
4285 int len = 0;
4286 int hlen = 0;
4287 int remain = count;
4288 struct sk_buff *skb;
4289 struct bt_skb_cb *scb;
4290
4291 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004292 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304293 return -EILSEQ;
4294
4295 skb = hdev->reassembly[index];
4296
4297 if (!skb) {
4298 switch (type) {
4299 case HCI_ACLDATA_PKT:
4300 len = HCI_MAX_FRAME_SIZE;
4301 hlen = HCI_ACL_HDR_SIZE;
4302 break;
4303 case HCI_EVENT_PKT:
4304 len = HCI_MAX_EVENT_SIZE;
4305 hlen = HCI_EVENT_HDR_SIZE;
4306 break;
4307 case HCI_SCODATA_PKT:
4308 len = HCI_MAX_SCO_SIZE;
4309 hlen = HCI_SCO_HDR_SIZE;
4310 break;
4311 }
4312
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004313 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304314 if (!skb)
4315 return -ENOMEM;
4316
4317 scb = (void *) skb->cb;
4318 scb->expect = hlen;
4319 scb->pkt_type = type;
4320
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304321 hdev->reassembly[index] = skb;
4322 }
4323
4324 while (count) {
4325 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004326 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304327
4328 memcpy(skb_put(skb, len), data, len);
4329
4330 count -= len;
4331 data += len;
4332 scb->expect -= len;
4333 remain = count;
4334
4335 switch (type) {
4336 case HCI_EVENT_PKT:
4337 if (skb->len == HCI_EVENT_HDR_SIZE) {
4338 struct hci_event_hdr *h = hci_event_hdr(skb);
4339 scb->expect = h->plen;
4340
4341 if (skb_tailroom(skb) < scb->expect) {
4342 kfree_skb(skb);
4343 hdev->reassembly[index] = NULL;
4344 return -ENOMEM;
4345 }
4346 }
4347 break;
4348
4349 case HCI_ACLDATA_PKT:
4350 if (skb->len == HCI_ACL_HDR_SIZE) {
4351 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4352 scb->expect = __le16_to_cpu(h->dlen);
4353
4354 if (skb_tailroom(skb) < scb->expect) {
4355 kfree_skb(skb);
4356 hdev->reassembly[index] = NULL;
4357 return -ENOMEM;
4358 }
4359 }
4360 break;
4361
4362 case HCI_SCODATA_PKT:
4363 if (skb->len == HCI_SCO_HDR_SIZE) {
4364 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4365 scb->expect = h->dlen;
4366
4367 if (skb_tailroom(skb) < scb->expect) {
4368 kfree_skb(skb);
4369 hdev->reassembly[index] = NULL;
4370 return -ENOMEM;
4371 }
4372 }
4373 break;
4374 }
4375
4376 if (scb->expect == 0) {
4377 /* Complete frame */
4378
4379 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004380 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304381
4382 hdev->reassembly[index] = NULL;
4383 return remain;
4384 }
4385 }
4386
4387 return remain;
4388}
4389
Suraj Sumangala99811512010-07-14 13:02:19 +05304390#define STREAM_REASSEMBLY 0
4391
4392int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4393{
4394 int type;
4395 int rem = 0;
4396
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004397 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304398 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4399
4400 if (!skb) {
4401 struct { char type; } *pkt;
4402
4403 /* Start of the frame */
4404 pkt = data;
4405 type = pkt->type;
4406
4407 data++;
4408 count--;
4409 } else
4410 type = bt_cb(skb)->pkt_type;
4411
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004412 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004413 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304414 if (rem < 0)
4415 return rem;
4416
4417 data += (count - rem);
4418 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004419 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304420
4421 return rem;
4422}
4423EXPORT_SYMBOL(hci_recv_stream_fragment);
4424
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425/* ---- Interface to upper protocols ---- */
4426
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427int hci_register_cb(struct hci_cb *cb)
4428{
4429 BT_DBG("%p name %s", cb, cb->name);
4430
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004431 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004433 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004434
4435 return 0;
4436}
4437EXPORT_SYMBOL(hci_register_cb);
4438
4439int hci_unregister_cb(struct hci_cb *cb)
4440{
4441 BT_DBG("%p name %s", cb, cb->name);
4442
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004443 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004445 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004446
4447 return 0;
4448}
4449EXPORT_SYMBOL(hci_unregister_cb);
4450
Marcel Holtmann51086992013-10-10 14:54:19 -07004451static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004453 int err;
4454
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004455 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004457 /* Time stamp */
4458 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004460 /* Send copy to monitor */
4461 hci_send_to_monitor(hdev, skb);
4462
4463 if (atomic_read(&hdev->promisc)) {
4464 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004465 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 }
4467
4468 /* Get rid of skb owner, prior to sending to the driver. */
4469 skb_orphan(skb);
4470
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004471 err = hdev->send(hdev, skb);
4472 if (err < 0) {
4473 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4474 kfree_skb(skb);
4475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476}
4477
Johan Hedberg3119ae92013-03-05 20:37:44 +02004478void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4479{
4480 skb_queue_head_init(&req->cmd_q);
4481 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004482 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004483}
4484
4485int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4486{
4487 struct hci_dev *hdev = req->hdev;
4488 struct sk_buff *skb;
4489 unsigned long flags;
4490
4491 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4492
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004493 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004494 * commands queued on the HCI request queue.
4495 */
4496 if (req->err) {
4497 skb_queue_purge(&req->cmd_q);
4498 return req->err;
4499 }
4500
Johan Hedberg3119ae92013-03-05 20:37:44 +02004501 /* Do not allow empty requests */
4502 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004503 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004504
4505 skb = skb_peek_tail(&req->cmd_q);
4506 bt_cb(skb)->req.complete = complete;
4507
4508 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4509 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4510 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4511
4512 queue_work(hdev->workqueue, &hdev->cmd_work);
4513
4514 return 0;
4515}
4516
Marcel Holtmann899de762014-07-11 05:51:58 +02004517bool hci_req_pending(struct hci_dev *hdev)
4518{
4519 return (hdev->req_status == HCI_REQ_PEND);
4520}
4521
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004522static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004523 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004524{
4525 int len = HCI_COMMAND_HDR_SIZE + plen;
4526 struct hci_command_hdr *hdr;
4527 struct sk_buff *skb;
4528
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004530 if (!skb)
4531 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532
4533 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004534 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004535 hdr->plen = plen;
4536
4537 if (plen)
4538 memcpy(skb_put(skb, plen), param, plen);
4539
4540 BT_DBG("skb len %d", skb->len);
4541
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004542 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004543 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004544
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004545 return skb;
4546}
4547
4548/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004549int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4550 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004551{
4552 struct sk_buff *skb;
4553
4554 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4555
4556 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4557 if (!skb) {
4558 BT_ERR("%s no memory for command", hdev->name);
4559 return -ENOMEM;
4560 }
4561
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004562 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004563 * single-command requests.
4564 */
4565 bt_cb(skb)->req.start = true;
4566
Linus Torvalds1da177e2005-04-16 15:20:36 -07004567 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004568 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569
4570 return 0;
4571}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004572
Johan Hedberg71c76a12013-03-05 20:37:46 +02004573/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004574void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4575 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004576{
4577 struct hci_dev *hdev = req->hdev;
4578 struct sk_buff *skb;
4579
4580 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4581
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004582 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004583 * queueing the HCI command. We can simply return.
4584 */
4585 if (req->err)
4586 return;
4587
Johan Hedberg71c76a12013-03-05 20:37:46 +02004588 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4589 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004590 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4591 hdev->name, opcode);
4592 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004593 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004594 }
4595
4596 if (skb_queue_empty(&req->cmd_q))
4597 bt_cb(skb)->req.start = true;
4598
Johan Hedberg02350a72013-04-03 21:50:29 +03004599 bt_cb(skb)->req.event = event;
4600
Johan Hedberg71c76a12013-03-05 20:37:46 +02004601 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004602}
4603
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004604void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4605 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004606{
4607 hci_req_add_ev(req, opcode, plen, param, 0);
4608}
4609
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004611void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612{
4613 struct hci_command_hdr *hdr;
4614
4615 if (!hdev->sent_cmd)
4616 return NULL;
4617
4618 hdr = (void *) hdev->sent_cmd->data;
4619
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004620 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004621 return NULL;
4622
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004623 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624
4625 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4626}
4627
4628/* Send ACL data */
4629static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4630{
4631 struct hci_acl_hdr *hdr;
4632 int len = skb->len;
4633
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004634 skb_push(skb, HCI_ACL_HDR_SIZE);
4635 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004636 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004637 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4638 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639}
4640
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004641static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004642 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004643{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004644 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004645 struct hci_dev *hdev = conn->hdev;
4646 struct sk_buff *list;
4647
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004648 skb->len = skb_headlen(skb);
4649 skb->data_len = 0;
4650
4651 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004652
4653 switch (hdev->dev_type) {
4654 case HCI_BREDR:
4655 hci_add_acl_hdr(skb, conn->handle, flags);
4656 break;
4657 case HCI_AMP:
4658 hci_add_acl_hdr(skb, chan->handle, flags);
4659 break;
4660 default:
4661 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4662 return;
4663 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004665 list = skb_shinfo(skb)->frag_list;
4666 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 /* Non fragmented */
4668 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4669
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004670 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004671 } else {
4672 /* Fragmented */
4673 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4674
4675 skb_shinfo(skb)->frag_list = NULL;
4676
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004677 /* Queue all fragments atomically. We need to use spin_lock_bh
4678 * here because of 6LoWPAN links, as there this function is
4679 * called from softirq and using normal spin lock could cause
4680 * deadlocks.
4681 */
4682 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004683
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004684 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004685
4686 flags &= ~ACL_START;
4687 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 do {
4689 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004690
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004691 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004692 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693
4694 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4695
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004696 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 } while (list);
4698
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004699 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004700 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004701}
4702
4703void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4704{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004705 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004706
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004707 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004708
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004709 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004710
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004711 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713
4714/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004715void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716{
4717 struct hci_dev *hdev = conn->hdev;
4718 struct hci_sco_hdr hdr;
4719
4720 BT_DBG("%s len %d", hdev->name, skb->len);
4721
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004722 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004723 hdr.dlen = skb->len;
4724
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004725 skb_push(skb, HCI_SCO_HDR_SIZE);
4726 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004727 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004728
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004729 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004730
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004732 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004734
4735/* ---- HCI TX task (outgoing data) ---- */
4736
4737/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004738static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4739 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740{
4741 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004742 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004743 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004745 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004747
4748 rcu_read_lock();
4749
4750 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004751 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004752 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004753
4754 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4755 continue;
4756
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 num++;
4758
4759 if (c->sent < min) {
4760 min = c->sent;
4761 conn = c;
4762 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004763
4764 if (hci_conn_num(hdev, type) == num)
4765 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 }
4767
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004768 rcu_read_unlock();
4769
Linus Torvalds1da177e2005-04-16 15:20:36 -07004770 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004771 int cnt, q;
4772
4773 switch (conn->type) {
4774 case ACL_LINK:
4775 cnt = hdev->acl_cnt;
4776 break;
4777 case SCO_LINK:
4778 case ESCO_LINK:
4779 cnt = hdev->sco_cnt;
4780 break;
4781 case LE_LINK:
4782 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4783 break;
4784 default:
4785 cnt = 0;
4786 BT_ERR("Unknown link type");
4787 }
4788
4789 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790 *quote = q ? q : 1;
4791 } else
4792 *quote = 0;
4793
4794 BT_DBG("conn %p quote %d", conn, *quote);
4795 return conn;
4796}
4797
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004798static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004799{
4800 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004801 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004802
Ville Tervobae1f5d92011-02-10 22:38:53 -03004803 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004805 rcu_read_lock();
4806
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004808 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004809 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004810 BT_ERR("%s killing stalled connection %pMR",
4811 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004812 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813 }
4814 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004815
4816 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004817}
4818
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004819static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4820 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004821{
4822 struct hci_conn_hash *h = &hdev->conn_hash;
4823 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004824 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004825 struct hci_conn *conn;
4826 int cnt, q, conn_num = 0;
4827
4828 BT_DBG("%s", hdev->name);
4829
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004830 rcu_read_lock();
4831
4832 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004833 struct hci_chan *tmp;
4834
4835 if (conn->type != type)
4836 continue;
4837
4838 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4839 continue;
4840
4841 conn_num++;
4842
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004843 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004844 struct sk_buff *skb;
4845
4846 if (skb_queue_empty(&tmp->data_q))
4847 continue;
4848
4849 skb = skb_peek(&tmp->data_q);
4850 if (skb->priority < cur_prio)
4851 continue;
4852
4853 if (skb->priority > cur_prio) {
4854 num = 0;
4855 min = ~0;
4856 cur_prio = skb->priority;
4857 }
4858
4859 num++;
4860
4861 if (conn->sent < min) {
4862 min = conn->sent;
4863 chan = tmp;
4864 }
4865 }
4866
4867 if (hci_conn_num(hdev, type) == conn_num)
4868 break;
4869 }
4870
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004871 rcu_read_unlock();
4872
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004873 if (!chan)
4874 return NULL;
4875
4876 switch (chan->conn->type) {
4877 case ACL_LINK:
4878 cnt = hdev->acl_cnt;
4879 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004880 case AMP_LINK:
4881 cnt = hdev->block_cnt;
4882 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004883 case SCO_LINK:
4884 case ESCO_LINK:
4885 cnt = hdev->sco_cnt;
4886 break;
4887 case LE_LINK:
4888 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4889 break;
4890 default:
4891 cnt = 0;
4892 BT_ERR("Unknown link type");
4893 }
4894
4895 q = cnt / num;
4896 *quote = q ? q : 1;
4897 BT_DBG("chan %p quote %d", chan, *quote);
4898 return chan;
4899}
4900
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004901static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4902{
4903 struct hci_conn_hash *h = &hdev->conn_hash;
4904 struct hci_conn *conn;
4905 int num = 0;
4906
4907 BT_DBG("%s", hdev->name);
4908
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004909 rcu_read_lock();
4910
4911 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004912 struct hci_chan *chan;
4913
4914 if (conn->type != type)
4915 continue;
4916
4917 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4918 continue;
4919
4920 num++;
4921
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004922 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004923 struct sk_buff *skb;
4924
4925 if (chan->sent) {
4926 chan->sent = 0;
4927 continue;
4928 }
4929
4930 if (skb_queue_empty(&chan->data_q))
4931 continue;
4932
4933 skb = skb_peek(&chan->data_q);
4934 if (skb->priority >= HCI_PRIO_MAX - 1)
4935 continue;
4936
4937 skb->priority = HCI_PRIO_MAX - 1;
4938
4939 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004940 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004941 }
4942
4943 if (hci_conn_num(hdev, type) == num)
4944 break;
4945 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004946
4947 rcu_read_unlock();
4948
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004949}
4950
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004951static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4952{
4953 /* Calculate count of blocks used by this packet */
4954 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4955}
4956
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004957static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004959 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 /* ACL tx timeout must be longer than maximum
4961 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004962 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004963 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004964 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004965 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004966}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004968static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004969{
4970 unsigned int cnt = hdev->acl_cnt;
4971 struct hci_chan *chan;
4972 struct sk_buff *skb;
4973 int quote;
4974
4975 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004976
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004977 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004978 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004979 u32 priority = (skb_peek(&chan->data_q))->priority;
4980 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004981 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004982 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004983
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004984 /* Stop if priority has changed */
4985 if (skb->priority < priority)
4986 break;
4987
4988 skb = skb_dequeue(&chan->data_q);
4989
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004990 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004991 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004992
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004993 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004994 hdev->acl_last_tx = jiffies;
4995
4996 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004997 chan->sent++;
4998 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999 }
5000 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005001
5002 if (cnt != hdev->acl_cnt)
5003 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005004}
5005
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005006static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005007{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005008 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005009 struct hci_chan *chan;
5010 struct sk_buff *skb;
5011 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005012 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005013
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005014 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005015
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005016 BT_DBG("%s", hdev->name);
5017
5018 if (hdev->dev_type == HCI_AMP)
5019 type = AMP_LINK;
5020 else
5021 type = ACL_LINK;
5022
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005023 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005024 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005025 u32 priority = (skb_peek(&chan->data_q))->priority;
5026 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5027 int blocks;
5028
5029 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005030 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005031
5032 /* Stop if priority has changed */
5033 if (skb->priority < priority)
5034 break;
5035
5036 skb = skb_dequeue(&chan->data_q);
5037
5038 blocks = __get_blocks(hdev, skb);
5039 if (blocks > hdev->block_cnt)
5040 return;
5041
5042 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005043 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005044
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005045 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005046 hdev->acl_last_tx = jiffies;
5047
5048 hdev->block_cnt -= blocks;
5049 quote -= blocks;
5050
5051 chan->sent += blocks;
5052 chan->conn->sent += blocks;
5053 }
5054 }
5055
5056 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005057 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005058}
5059
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005060static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005061{
5062 BT_DBG("%s", hdev->name);
5063
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005064 /* No ACL link over BR/EDR controller */
5065 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5066 return;
5067
5068 /* No AMP link over AMP controller */
5069 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005070 return;
5071
5072 switch (hdev->flow_ctl_mode) {
5073 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5074 hci_sched_acl_pkt(hdev);
5075 break;
5076
5077 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5078 hci_sched_acl_blk(hdev);
5079 break;
5080 }
5081}
5082
Linus Torvalds1da177e2005-04-16 15:20:36 -07005083/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005084static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005085{
5086 struct hci_conn *conn;
5087 struct sk_buff *skb;
5088 int quote;
5089
5090 BT_DBG("%s", hdev->name);
5091
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005092 if (!hci_conn_num(hdev, SCO_LINK))
5093 return;
5094
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5096 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5097 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005098 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099
5100 conn->sent++;
5101 if (conn->sent == ~0)
5102 conn->sent = 0;
5103 }
5104 }
5105}
5106
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005107static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005108{
5109 struct hci_conn *conn;
5110 struct sk_buff *skb;
5111 int quote;
5112
5113 BT_DBG("%s", hdev->name);
5114
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005115 if (!hci_conn_num(hdev, ESCO_LINK))
5116 return;
5117
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005118 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5119 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005120 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5121 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005122 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005123
5124 conn->sent++;
5125 if (conn->sent == ~0)
5126 conn->sent = 0;
5127 }
5128 }
5129}
5130
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005131static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005132{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005133 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005134 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005135 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005136
5137 BT_DBG("%s", hdev->name);
5138
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005139 if (!hci_conn_num(hdev, LE_LINK))
5140 return;
5141
Marcel Holtmann4a964402014-07-02 19:10:33 +02005142 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005143 /* LE tx timeout must be longer than maximum
5144 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005145 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005146 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005147 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005148 }
5149
5150 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005151 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005152 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005153 u32 priority = (skb_peek(&chan->data_q))->priority;
5154 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005155 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005156 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005157
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005158 /* Stop if priority has changed */
5159 if (skb->priority < priority)
5160 break;
5161
5162 skb = skb_dequeue(&chan->data_q);
5163
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005164 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005165 hdev->le_last_tx = jiffies;
5166
5167 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005168 chan->sent++;
5169 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005170 }
5171 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005172
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005173 if (hdev->le_pkts)
5174 hdev->le_cnt = cnt;
5175 else
5176 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005177
5178 if (cnt != tmp)
5179 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005180}
5181
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005182static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005184 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 struct sk_buff *skb;
5186
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005187 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005188 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189
Marcel Holtmann52de5992013-09-03 18:08:38 -07005190 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5191 /* Schedule queues and send stuff to HCI driver */
5192 hci_sched_acl(hdev);
5193 hci_sched_sco(hdev);
5194 hci_sched_esco(hdev);
5195 hci_sched_le(hdev);
5196 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005197
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198 /* Send next queued raw (unknown type) packet */
5199 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005200 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005201}
5202
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005203/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005204
5205/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005206static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005207{
5208 struct hci_acl_hdr *hdr = (void *) skb->data;
5209 struct hci_conn *conn;
5210 __u16 handle, flags;
5211
5212 skb_pull(skb, HCI_ACL_HDR_SIZE);
5213
5214 handle = __le16_to_cpu(hdr->handle);
5215 flags = hci_flags(handle);
5216 handle = hci_handle(handle);
5217
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005218 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005219 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
5221 hdev->stat.acl_rx++;
5222
5223 hci_dev_lock(hdev);
5224 conn = hci_conn_hash_lookup_handle(hdev, handle);
5225 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005226
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005228 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005229
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005231 l2cap_recv_acldata(conn, skb, flags);
5232 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005234 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005235 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 }
5237
5238 kfree_skb(skb);
5239}
5240
5241/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005242static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243{
5244 struct hci_sco_hdr *hdr = (void *) skb->data;
5245 struct hci_conn *conn;
5246 __u16 handle;
5247
5248 skb_pull(skb, HCI_SCO_HDR_SIZE);
5249
5250 handle = __le16_to_cpu(hdr->handle);
5251
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005252 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005253
5254 hdev->stat.sco_rx++;
5255
5256 hci_dev_lock(hdev);
5257 conn = hci_conn_hash_lookup_handle(hdev, handle);
5258 hci_dev_unlock(hdev);
5259
5260 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005262 sco_recv_scodata(conn, skb);
5263 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005265 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005266 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005267 }
5268
5269 kfree_skb(skb);
5270}
5271
Johan Hedberg9238f362013-03-05 20:37:48 +02005272static bool hci_req_is_complete(struct hci_dev *hdev)
5273{
5274 struct sk_buff *skb;
5275
5276 skb = skb_peek(&hdev->cmd_q);
5277 if (!skb)
5278 return true;
5279
5280 return bt_cb(skb)->req.start;
5281}
5282
Johan Hedberg42c6b122013-03-05 20:37:49 +02005283static void hci_resend_last(struct hci_dev *hdev)
5284{
5285 struct hci_command_hdr *sent;
5286 struct sk_buff *skb;
5287 u16 opcode;
5288
5289 if (!hdev->sent_cmd)
5290 return;
5291
5292 sent = (void *) hdev->sent_cmd->data;
5293 opcode = __le16_to_cpu(sent->opcode);
5294 if (opcode == HCI_OP_RESET)
5295 return;
5296
5297 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5298 if (!skb)
5299 return;
5300
5301 skb_queue_head(&hdev->cmd_q, skb);
5302 queue_work(hdev->workqueue, &hdev->cmd_work);
5303}
5304
Johan Hedberg9238f362013-03-05 20:37:48 +02005305void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5306{
5307 hci_req_complete_t req_complete = NULL;
5308 struct sk_buff *skb;
5309 unsigned long flags;
5310
5311 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5312
Johan Hedberg42c6b122013-03-05 20:37:49 +02005313 /* If the completed command doesn't match the last one that was
5314 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005315 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005316 if (!hci_sent_cmd_data(hdev, opcode)) {
5317 /* Some CSR based controllers generate a spontaneous
5318 * reset complete event during init and any pending
5319 * command will never be completed. In such a case we
5320 * need to resend whatever was the last sent
5321 * command.
5322 */
5323 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5324 hci_resend_last(hdev);
5325
Johan Hedberg9238f362013-03-05 20:37:48 +02005326 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005327 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005328
5329 /* If the command succeeded and there's still more commands in
5330 * this request the request is not yet complete.
5331 */
5332 if (!status && !hci_req_is_complete(hdev))
5333 return;
5334
5335 /* If this was the last command in a request the complete
5336 * callback would be found in hdev->sent_cmd instead of the
5337 * command queue (hdev->cmd_q).
5338 */
5339 if (hdev->sent_cmd) {
5340 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005341
5342 if (req_complete) {
5343 /* We must set the complete callback to NULL to
5344 * avoid calling the callback more than once if
5345 * this function gets called again.
5346 */
5347 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5348
Johan Hedberg9238f362013-03-05 20:37:48 +02005349 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005350 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005351 }
5352
5353 /* Remove all pending commands belonging to this request */
5354 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5355 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5356 if (bt_cb(skb)->req.start) {
5357 __skb_queue_head(&hdev->cmd_q, skb);
5358 break;
5359 }
5360
5361 req_complete = bt_cb(skb)->req.complete;
5362 kfree_skb(skb);
5363 }
5364 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5365
5366call_complete:
5367 if (req_complete)
5368 req_complete(hdev, status);
5369}
5370
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005371static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005373 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 struct sk_buff *skb;
5375
5376 BT_DBG("%s", hdev->name);
5377
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005379 /* Send copy to monitor */
5380 hci_send_to_monitor(hdev, skb);
5381
Linus Torvalds1da177e2005-04-16 15:20:36 -07005382 if (atomic_read(&hdev->promisc)) {
5383 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005384 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385 }
5386
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005387 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388 kfree_skb(skb);
5389 continue;
5390 }
5391
5392 if (test_bit(HCI_INIT, &hdev->flags)) {
5393 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005394 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 case HCI_ACLDATA_PKT:
5396 case HCI_SCODATA_PKT:
5397 kfree_skb(skb);
5398 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 }
5401
5402 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005403 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005404 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005405 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406 hci_event_packet(hdev, skb);
5407 break;
5408
5409 case HCI_ACLDATA_PKT:
5410 BT_DBG("%s ACL data packet", hdev->name);
5411 hci_acldata_packet(hdev, skb);
5412 break;
5413
5414 case HCI_SCODATA_PKT:
5415 BT_DBG("%s SCO data packet", hdev->name);
5416 hci_scodata_packet(hdev, skb);
5417 break;
5418
5419 default:
5420 kfree_skb(skb);
5421 break;
5422 }
5423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424}
5425
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005426static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005428 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 struct sk_buff *skb;
5430
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005431 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5432 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433
Linus Torvalds1da177e2005-04-16 15:20:36 -07005434 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005435 if (atomic_read(&hdev->cmd_cnt)) {
5436 skb = skb_dequeue(&hdev->cmd_q);
5437 if (!skb)
5438 return;
5439
Wei Yongjun7585b972009-02-25 18:29:52 +08005440 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005441
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005442 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005443 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005444 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005445 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005446 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005447 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005448 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005449 schedule_delayed_work(&hdev->cmd_timer,
5450 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005451 } else {
5452 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005453 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005454 }
5455 }
5456}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005457
5458void hci_req_add_le_scan_disable(struct hci_request *req)
5459{
5460 struct hci_cp_le_set_scan_enable cp;
5461
5462 memset(&cp, 0, sizeof(cp));
5463 cp.enable = LE_SCAN_DISABLE;
5464 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5465}
Andre Guedesa4790db2014-02-26 20:21:47 -03005466
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005467static void add_to_white_list(struct hci_request *req,
5468 struct hci_conn_params *params)
5469{
5470 struct hci_cp_le_add_to_white_list cp;
5471
5472 cp.bdaddr_type = params->addr_type;
5473 bacpy(&cp.bdaddr, &params->addr);
5474
5475 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5476}
5477
5478static u8 update_white_list(struct hci_request *req)
5479{
5480 struct hci_dev *hdev = req->hdev;
5481 struct hci_conn_params *params;
5482 struct bdaddr_list *b;
5483 uint8_t white_list_entries = 0;
5484
5485 /* Go through the current white list programmed into the
5486 * controller one by one and check if that address is still
5487 * in the list of pending connections or list of devices to
5488 * report. If not present in either list, then queue the
5489 * command to remove it from the controller.
5490 */
5491 list_for_each_entry(b, &hdev->le_white_list, list) {
5492 struct hci_cp_le_del_from_white_list cp;
5493
5494 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5495 &b->bdaddr, b->bdaddr_type) ||
5496 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5497 &b->bdaddr, b->bdaddr_type)) {
5498 white_list_entries++;
5499 continue;
5500 }
5501
5502 cp.bdaddr_type = b->bdaddr_type;
5503 bacpy(&cp.bdaddr, &b->bdaddr);
5504
5505 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5506 sizeof(cp), &cp);
5507 }
5508
5509 /* Since all no longer valid white list entries have been
5510 * removed, walk through the list of pending connections
5511 * and ensure that any new device gets programmed into
5512 * the controller.
5513 *
5514 * If the list of the devices is larger than the list of
5515 * available white list entries in the controller, then
5516 * just abort and return filer policy value to not use the
5517 * white list.
5518 */
5519 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5520 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5521 &params->addr, params->addr_type))
5522 continue;
5523
5524 if (white_list_entries >= hdev->le_white_list_size) {
5525 /* Select filter policy to accept all advertising */
5526 return 0x00;
5527 }
5528
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005529 if (hci_find_irk_by_addr(hdev, &params->addr,
5530 params->addr_type)) {
5531 /* White list can not be used with RPAs */
5532 return 0x00;
5533 }
5534
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005535 white_list_entries++;
5536 add_to_white_list(req, params);
5537 }
5538
5539 /* After adding all new pending connections, walk through
5540 * the list of pending reports and also add these to the
5541 * white list if there is still space.
5542 */
5543 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5544 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5545 &params->addr, params->addr_type))
5546 continue;
5547
5548 if (white_list_entries >= hdev->le_white_list_size) {
5549 /* Select filter policy to accept all advertising */
5550 return 0x00;
5551 }
5552
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005553 if (hci_find_irk_by_addr(hdev, &params->addr,
5554 params->addr_type)) {
5555 /* White list can not be used with RPAs */
5556 return 0x00;
5557 }
5558
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005559 white_list_entries++;
5560 add_to_white_list(req, params);
5561 }
5562
5563 /* Select filter policy to use white list */
5564 return 0x01;
5565}
5566
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005567void hci_req_add_le_passive_scan(struct hci_request *req)
5568{
5569 struct hci_cp_le_set_scan_param param_cp;
5570 struct hci_cp_le_set_scan_enable enable_cp;
5571 struct hci_dev *hdev = req->hdev;
5572 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005573 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005574
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005575 /* Set require_privacy to false since no SCAN_REQ are send
5576 * during passive scanning. Not using an unresolvable address
5577 * here is important so that peer devices using direct
5578 * advertising with our address will be correctly reported
5579 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005580 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005581 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005582 return;
5583
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005584 /* Adding or removing entries from the white list must
5585 * happen before enabling scanning. The controller does
5586 * not allow white list modification while scanning.
5587 */
5588 filter_policy = update_white_list(req);
5589
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005590 memset(&param_cp, 0, sizeof(param_cp));
5591 param_cp.type = LE_SCAN_PASSIVE;
5592 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5593 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5594 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005595 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005596 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5597 &param_cp);
5598
5599 memset(&enable_cp, 0, sizeof(enable_cp));
5600 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005601 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005602 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5603 &enable_cp);
5604}
5605
Andre Guedesa4790db2014-02-26 20:21:47 -03005606static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5607{
5608 if (status)
5609 BT_DBG("HCI request failed to update background scanning: "
5610 "status 0x%2.2x", status);
5611}
5612
5613/* This function controls the background scanning based on hdev->pend_le_conns
5614 * list. If there are pending LE connection we start the background scanning,
5615 * otherwise we stop it.
5616 *
5617 * This function requires the caller holds hdev->lock.
5618 */
5619void hci_update_background_scan(struct hci_dev *hdev)
5620{
Andre Guedesa4790db2014-02-26 20:21:47 -03005621 struct hci_request req;
5622 struct hci_conn *conn;
5623 int err;
5624
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005625 if (!test_bit(HCI_UP, &hdev->flags) ||
5626 test_bit(HCI_INIT, &hdev->flags) ||
5627 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005628 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005629 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005630 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005631 return;
5632
Johan Hedberga70f4b52014-07-07 15:19:50 +03005633 /* No point in doing scanning if LE support hasn't been enabled */
5634 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5635 return;
5636
Johan Hedbergae23ada2014-07-07 13:24:59 +03005637 /* If discovery is active don't interfere with it */
5638 if (hdev->discovery.state != DISCOVERY_STOPPED)
5639 return;
5640
Andre Guedesa4790db2014-02-26 20:21:47 -03005641 hci_req_init(&req, hdev);
5642
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005643 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005644 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005645 /* If there is no pending LE connections or devices
5646 * to be scanned for, we should stop the background
5647 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005648 */
5649
5650 /* If controller is not scanning we are done. */
5651 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5652 return;
5653
5654 hci_req_add_le_scan_disable(&req);
5655
5656 BT_DBG("%s stopping background scanning", hdev->name);
5657 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005658 /* If there is at least one pending LE connection, we should
5659 * keep the background scan running.
5660 */
5661
Andre Guedesa4790db2014-02-26 20:21:47 -03005662 /* If controller is connecting, we should not start scanning
5663 * since some controllers are not able to scan and connect at
5664 * the same time.
5665 */
5666 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5667 if (conn)
5668 return;
5669
Andre Guedes4340a122014-03-10 18:26:24 -03005670 /* If controller is currently scanning, we stop it to ensure we
5671 * don't miss any advertising (due to duplicates filter).
5672 */
5673 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5674 hci_req_add_le_scan_disable(&req);
5675
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005676 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005677
5678 BT_DBG("%s starting background scanning", hdev->name);
5679 }
5680
5681 err = hci_req_run(&req, update_background_scan_complete);
5682 if (err)
5683 BT_ERR("Failed to run HCI request: err %d", err);
5684}
Johan Hedberg432df052014-08-01 11:13:31 +03005685
Johan Hedberg22f433d2014-08-01 11:13:32 +03005686static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5687{
5688 struct bdaddr_list *b;
5689
5690 list_for_each_entry(b, &hdev->whitelist, list) {
5691 struct hci_conn *conn;
5692
5693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5694 if (!conn)
5695 return true;
5696
5697 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5698 return true;
5699 }
5700
5701 return false;
5702}
5703
Johan Hedberg432df052014-08-01 11:13:31 +03005704void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5705{
5706 u8 scan;
5707
5708 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5709 return;
5710
5711 if (!hdev_is_powered(hdev))
5712 return;
5713
5714 if (mgmt_powering_down(hdev))
5715 return;
5716
5717 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005718 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005719 scan = SCAN_PAGE;
5720 else
5721 scan = SCAN_DISABLED;
5722
5723 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5724 return;
5725
5726 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5727 scan |= SCAN_INQUIRY;
5728
5729 if (req)
5730 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5731 else
5732 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5733}