blob: 6c3220e9484fdcb5868cbe6e4fe8939ce9e229b1 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
Johan Hedberg0378b592014-11-19 15:22:22 +0200277 struct link_key *key;
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700278
Johan Hedberg0378b592014-11-19 15:22:22 +0200279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
Johan Hedberg0378b592014-11-19 15:22:22 +0200283 rcu_read_unlock();
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
Marcel Holtmann041000b2013-10-17 12:02:31 -0700324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
Marcel Holtmann111902f2014-06-21 04:53:17 +0200369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
Marcel Holtmann111902f2014-06-21 04:53:17 +0200394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800395 return -EALREADY;
396
Marcel Holtmann111902f2014-06-21 04:53:17 +0200397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700435 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700494 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700522 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
Marcel Holtmannac345812014-02-23 12:44:25 -0800598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200601 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
Johan Hedberga1f4c312014-02-27 14:05:41 +0200606 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800609 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700677{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700680
Marcel Holtmann111902f2014-06-21 04:53:17 +0200681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
685}
686
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
690{
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
695
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
698
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700704 return -EINVAL;
705
Marcel Holtmann111902f2014-06-21 04:53:17 +0200706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800707 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700708
Marcel Holtmann111902f2014-06-21 04:53:17 +0200709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800710
711 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700712}
713
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
Marcel Holtmann92202182013-10-18 16:38:10 -0700720
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
Marcel Holtmann3698d702014-02-18 21:54:49 -0800746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200749 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800750
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200757 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200778 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700779
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800785 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200786 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700811 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700839 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200859static int conn_latency_set(void *data, u64 val)
860{
861 struct hci_dev *hdev = data;
862
863 if (val > 0x01f3)
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
867 hdev->le_conn_latency = val;
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
873static int conn_latency_get(void *data, u64 *val)
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
878 *val = hdev->le_conn_latency;
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
886
Marcel Holtmannf1649572014-06-30 12:34:38 +0200887static int supervision_timeout_set(void *data, u64 val)
888{
889 struct hci_dev *hdev = data;
890
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
899}
900
901static int supervision_timeout_get(void *data, u64 *val)
902{
903 struct hci_dev *hdev = data;
904
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
908
909 return 0;
910}
911
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
914
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
918
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
921
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
925
926 return 0;
927}
928
929static int adv_channel_map_get(void *data, u64 *val)
930{
931 struct hci_dev *hdev = data;
932
933 hci_dev_lock(hdev);
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
936
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
942
Georg Lukas729a1052014-07-26 13:59:58 +0200943static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200944{
Georg Lukas729a1052014-07-26 13:59:58 +0200945 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200946
Georg Lukas729a1052014-07-26 13:59:58 +0200947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200948 return -EINVAL;
949
Andre Guedes7d474e02014-02-26 20:21:54 -0300950 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200951 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
Georg Lukas729a1052014-07-26 13:59:58 +0200957static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300958{
Georg Lukas729a1052014-07-26 13:59:58 +0200959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300966}
967
Georg Lukas729a1052014-07-26 13:59:58 +0200968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300972{
Georg Lukas729a1052014-07-26 13:59:58 +0200973 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300974
Georg Lukas729a1052014-07-26 13:59:58 +0200975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300976 return -EINVAL;
977
Georg Lukas729a1052014-07-26 13:59:58 +0200978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300981
Georg Lukas729a1052014-07-26 13:59:58 +0200982 return 0;
983}
Andre Guedes7d474e02014-02-26 20:21:54 -0300984
Georg Lukas729a1052014-07-26 13:59:58 +0200985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300988
Georg Lukas729a1052014-07-26 13:59:58 +0200989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300992
Georg Lukas729a1052014-07-26 13:59:58 +0200993 return 0;
994}
Andre Guedes7d474e02014-02-26 20:21:54 -0300995
Georg Lukas729a1052014-07-26 13:59:58 +0200996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -0300998
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200999static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001000{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001002 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001003 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004
Andre Guedes7d474e02014-02-26 20:21:54 -03001005 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001008 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001015}
1016
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001017static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001018{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001020}
1021
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001024 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029/* ---- HCI requests ---- */
1030
Johan Hedberg42c6b122013-03-05 20:37:49 +02001031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
Fengguang Wu77a63e02013-04-20 16:24:31 +03001053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
Johan Hedberg75e84b72013-04-02 13:35:04 +03001084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001109 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001119 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001120
1121 hdev->req_status = HCI_REQ_PEND;
1122
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
Chan-yeol Park039fada2014-10-31 14:23:06 +09001126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001129 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001130 return ERR_PTR(err);
1131 }
1132
Johan Hedberg75e84b72013-04-02 13:35:04 +03001133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001166 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001173static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001176 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 hci_req_init(&req, hdev);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 hdev->req_status = HCI_REQ_PEND;
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001189
Chan-yeol Park039fada2014-10-31 14:23:06 +09001190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
Johan Hedberg42c6b122013-03-05 20:37:49 +02001193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001195 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001196
Chan-yeol Park039fada2014-10-31 14:23:06 +09001197 remove_wait_queue(&hdev->req_wait_q, &wait);
Johan Hedberg22a3cea2014-11-19 13:16:41 +02001198 set_current_state(TASK_RUNNING);
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199
Andre Guedes920c8302013-03-08 11:20:15 -03001200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 */
Andre Guedes920c8302013-03-08 11:20:15 -03001205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
1427 u8 mode;
1428
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509}
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 struct hci_dev *hdev = req->hdev;
1514
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001605 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001650 hci_setup_event_mask(req);
1651
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001664 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677
Andre Guedes9193c6e2014-07-01 18:10:09 -03001678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
Andre Guedes9193c6e2014-07-01 18:10:09 -03001695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001704 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001714}
1715
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
Marcel Holtmann109e3192014-07-23 19:24:56 +02001724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001732 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001733 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001735
1736 /* Enable Secure Connections if supported and configured */
Johan Hedberg710f11c2014-05-26 11:21:22 +03001737 if (bredr_sc_enabled(hdev)) {
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001738 u8 support = 0x01;
1739 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1740 sizeof(support), &support);
1741 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001742}
1743
Johan Hedberg2177bab2013-03-05 20:37:43 +02001744static int __hci_init(struct hci_dev *hdev)
1745{
1746 int err;
1747
1748 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1749 if (err < 0)
1750 return err;
1751
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001752 /* The Device Under Test (DUT) mode is special and available for
1753 * all controller types. So just create it early on.
1754 */
1755 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1756 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1757 &dut_mode_fops);
1758 }
1759
Johan Hedberg2177bab2013-03-05 20:37:43 +02001760 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1761 * BR/EDR/LE type controllers. AMP controllers only need the
1762 * first stage init.
1763 */
1764 if (hdev->dev_type != HCI_BREDR)
1765 return 0;
1766
1767 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001771 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1772 if (err < 0)
1773 return err;
1774
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001775 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1776 if (err < 0)
1777 return err;
1778
1779 /* Only create debugfs entries during the initial setup
1780 * phase and not every time the controller gets powered on.
1781 */
1782 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1783 return 0;
1784
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001785 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1786 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001787 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1788 &hdev->manufacturer);
1789 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1790 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001791 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1792 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001793 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1794 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001795 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1796
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001797 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1798 &conn_info_min_age_fops);
1799 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_max_age_fops);
1801
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001802 if (lmp_bredr_capable(hdev)) {
1803 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1804 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001805 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1806 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001807 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1808 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001809 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1810 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001811 }
1812
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001813 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001814 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1815 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001816 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1817 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001818 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1819 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001820 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001821
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001822 if (lmp_sniff_capable(hdev)) {
1823 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1824 hdev, &idle_timeout_fops);
1825 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1826 hdev, &sniff_min_interval_fops);
1827 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_max_interval_fops);
1829 }
1830
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001831 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001832 debugfs_create_file("identity", 0400, hdev->debugfs,
1833 hdev, &identity_fops);
1834 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1835 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001836 debugfs_create_file("random_address", 0444, hdev->debugfs,
1837 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001838 debugfs_create_file("static_address", 0444, hdev->debugfs,
1839 hdev, &static_address_fops);
1840
1841 /* For controllers with a public address, provide a debug
1842 * option to force the usage of the configured static
1843 * address. By default the public address is used.
1844 */
1845 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1846 debugfs_create_file("force_static_address", 0644,
1847 hdev->debugfs, hdev,
1848 &force_static_address_fops);
1849
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001850 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1851 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001852 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1853 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001854 debugfs_create_file("identity_resolving_keys", 0400,
1855 hdev->debugfs, hdev,
1856 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001857 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1858 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001859 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1860 hdev, &conn_min_interval_fops);
1861 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1862 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001863 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1864 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001865 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1866 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001867 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1868 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001869 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1870 hdev, &adv_min_interval_fops);
1871 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1872 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001873 debugfs_create_u16("discov_interleaved_timeout", 0644,
1874 hdev->debugfs,
1875 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001876
Johan Hedberg711eafe2014-08-08 09:32:52 +03001877 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001878 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001879
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001880 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001881}
1882
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001883static void hci_init0_req(struct hci_request *req, unsigned long opt)
1884{
1885 struct hci_dev *hdev = req->hdev;
1886
1887 BT_DBG("%s %ld", hdev->name, opt);
1888
1889 /* Reset */
1890 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1891 hci_reset_req(req, 0);
1892
1893 /* Read Local Version */
1894 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1895
1896 /* Read BD Address */
1897 if (hdev->set_bdaddr)
1898 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1899}
1900
1901static int __hci_unconf_init(struct hci_dev *hdev)
1902{
1903 int err;
1904
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001905 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1906 return 0;
1907
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001908 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1909 if (err < 0)
1910 return err;
1911
1912 return 0;
1913}
1914
Johan Hedberg42c6b122013-03-05 20:37:49 +02001915static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916{
1917 __u8 scan = opt;
1918
Johan Hedberg42c6b122013-03-05 20:37:49 +02001919 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920
1921 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001922 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923}
1924
Johan Hedberg42c6b122013-03-05 20:37:49 +02001925static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926{
1927 __u8 auth = opt;
1928
Johan Hedberg42c6b122013-03-05 20:37:49 +02001929 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001932 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933}
1934
Johan Hedberg42c6b122013-03-05 20:37:49 +02001935static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936{
1937 __u8 encrypt = opt;
1938
Johan Hedberg42c6b122013-03-05 20:37:49 +02001939 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001941 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001942 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943}
1944
Johan Hedberg42c6b122013-03-05 20:37:49 +02001945static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001946{
1947 __le16 policy = cpu_to_le16(opt);
1948
Johan Hedberg42c6b122013-03-05 20:37:49 +02001949 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001950
1951 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001953}
1954
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001955/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 * Device is held on return. */
1957struct hci_dev *hci_dev_get(int index)
1958{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001959 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
1961 BT_DBG("%d", index);
1962
1963 if (index < 0)
1964 return NULL;
1965
1966 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001967 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 if (d->id == index) {
1969 hdev = hci_dev_hold(d);
1970 break;
1971 }
1972 }
1973 read_unlock(&hci_dev_list_lock);
1974 return hdev;
1975}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976
1977/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001978
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001979bool hci_discovery_active(struct hci_dev *hdev)
1980{
1981 struct discovery_state *discov = &hdev->discovery;
1982
Andre Guedes6fbe1952012-02-03 17:47:58 -03001983 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001984 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001985 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001986 return true;
1987
Andre Guedes6fbe1952012-02-03 17:47:58 -03001988 default:
1989 return false;
1990 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991}
1992
Johan Hedbergff9ef572012-01-04 14:23:45 +02001993void hci_discovery_set_state(struct hci_dev *hdev, int state)
1994{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001995 int old_state = hdev->discovery.state;
1996
Johan Hedbergff9ef572012-01-04 14:23:45 +02001997 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1998
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001999 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002000 return;
2001
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002002 hdev->discovery.state = state;
2003
Johan Hedbergff9ef572012-01-04 14:23:45 +02002004 switch (state) {
2005 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002006 hci_update_background_scan(hdev);
2007
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002008 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002009 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002010 break;
2011 case DISCOVERY_STARTING:
2012 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002013 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002014 mgmt_discovering(hdev, 1);
2015 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002016 case DISCOVERY_RESOLVING:
2017 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002018 case DISCOVERY_STOPPING:
2019 break;
2020 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002021}
2022
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002023void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024{
Johan Hedberg30883512012-01-04 14:16:21 +02002025 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002026 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027
Johan Hedberg561aafb2012-01-04 13:31:59 +02002028 list_for_each_entry_safe(p, n, &cache->all, all) {
2029 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002030 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032
2033 INIT_LIST_HEAD(&cache->unknown);
2034 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035}
2036
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002037struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2038 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
Johan Hedberg30883512012-01-04 14:16:21 +02002040 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 struct inquiry_entry *e;
2042
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002043 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
Johan Hedberg561aafb2012-01-04 13:31:59 +02002045 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002047 return e;
2048 }
2049
2050 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051}
2052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002054 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055{
Johan Hedberg30883512012-01-04 14:16:21 +02002056 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057 struct inquiry_entry *e;
2058
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002059 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060
2061 list_for_each_entry(e, &cache->unknown, list) {
2062 if (!bacmp(&e->data.bdaddr, bdaddr))
2063 return e;
2064 }
2065
2066 return NULL;
2067}
2068
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002069struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002070 bdaddr_t *bdaddr,
2071 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002072{
2073 struct discovery_state *cache = &hdev->discovery;
2074 struct inquiry_entry *e;
2075
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002076 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002077
2078 list_for_each_entry(e, &cache->resolve, list) {
2079 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2080 return e;
2081 if (!bacmp(&e->data.bdaddr, bdaddr))
2082 return e;
2083 }
2084
2085 return NULL;
2086}
2087
Johan Hedberga3d4e202012-01-09 00:53:02 +02002088void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002089 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002090{
2091 struct discovery_state *cache = &hdev->discovery;
2092 struct list_head *pos = &cache->resolve;
2093 struct inquiry_entry *p;
2094
2095 list_del(&ie->list);
2096
2097 list_for_each_entry(p, &cache->resolve, list) {
2098 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002099 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002100 break;
2101 pos = &p->list;
2102 }
2103
2104 list_add(&ie->list, pos);
2105}
2106
Marcel Holtmannaf589252014-07-01 14:11:20 +02002107u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2108 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109{
Johan Hedberg30883512012-01-04 14:16:21 +02002110 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002111 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002112 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002114 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Szymon Janc2b2fec42012-11-20 11:38:54 +01002116 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2117
Marcel Holtmannaf589252014-07-01 14:11:20 +02002118 if (!data->ssp_mode)
2119 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002120
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002122 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002123 if (!ie->data.ssp_mode)
2124 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002125
Johan Hedberga3d4e202012-01-09 00:53:02 +02002126 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002127 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002128 ie->data.rssi = data->rssi;
2129 hci_inquiry_cache_update_resolve(hdev, ie);
2130 }
2131
Johan Hedberg561aafb2012-01-04 13:31:59 +02002132 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002133 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002134
Johan Hedberg561aafb2012-01-04 13:31:59 +02002135 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002136 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002137 if (!ie) {
2138 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2139 goto done;
2140 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002141
2142 list_add(&ie->all, &cache->all);
2143
2144 if (name_known) {
2145 ie->name_state = NAME_KNOWN;
2146 } else {
2147 ie->name_state = NAME_NOT_KNOWN;
2148 list_add(&ie->list, &cache->unknown);
2149 }
2150
2151update:
2152 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002153 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002154 ie->name_state = NAME_KNOWN;
2155 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002158 memcpy(&ie->data, data, sizeof(*data));
2159 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002161
2162 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002163 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002164
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165done:
2166 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167}
2168
2169static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2170{
Johan Hedberg30883512012-01-04 14:16:21 +02002171 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 struct inquiry_info *info = (struct inquiry_info *) buf;
2173 struct inquiry_entry *e;
2174 int copied = 0;
2175
Johan Hedberg561aafb2012-01-04 13:31:59 +02002176 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002178
2179 if (copied >= num)
2180 break;
2181
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 bacpy(&info->bdaddr, &data->bdaddr);
2183 info->pscan_rep_mode = data->pscan_rep_mode;
2184 info->pscan_period_mode = data->pscan_period_mode;
2185 info->pscan_mode = data->pscan_mode;
2186 memcpy(info->dev_class, data->dev_class, 3);
2187 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002190 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 }
2192
2193 BT_DBG("cache %p, copied %d", cache, copied);
2194 return copied;
2195}
2196
Johan Hedberg42c6b122013-03-05 20:37:49 +02002197static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198{
2199 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002200 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 struct hci_cp_inquiry cp;
2202
2203 BT_DBG("%s", hdev->name);
2204
2205 if (test_bit(HCI_INQUIRY, &hdev->flags))
2206 return;
2207
2208 /* Start Inquiry */
2209 memcpy(&cp.lap, &ir->lap, 3);
2210 cp.length = ir->length;
2211 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002212 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
2215int hci_inquiry(void __user *arg)
2216{
2217 __u8 __user *ptr = arg;
2218 struct hci_inquiry_req ir;
2219 struct hci_dev *hdev;
2220 int err = 0, do_inquiry = 0, max_rsp;
2221 long timeo;
2222 __u8 *buf;
2223
2224 if (copy_from_user(&ir, ptr, sizeof(ir)))
2225 return -EFAULT;
2226
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002227 hdev = hci_dev_get(ir.dev_id);
2228 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return -ENODEV;
2230
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002231 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2232 err = -EBUSY;
2233 goto done;
2234 }
2235
Marcel Holtmann4a964402014-07-02 19:10:33 +02002236 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002237 err = -EOPNOTSUPP;
2238 goto done;
2239 }
2240
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002241 if (hdev->dev_type != HCI_BREDR) {
2242 err = -EOPNOTSUPP;
2243 goto done;
2244 }
2245
Johan Hedberg56f87902013-10-02 13:43:13 +03002246 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2247 err = -EOPNOTSUPP;
2248 goto done;
2249 }
2250
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002251 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002252 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002253 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002254 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 do_inquiry = 1;
2256 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002257 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Marcel Holtmann04837f62006-07-03 10:02:33 +02002259 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002260
2261 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002262 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2263 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002264 if (err < 0)
2265 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002266
2267 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2268 * cleared). If it is interrupted by a signal, return -EINTR.
2269 */
NeilBrown74316202014-07-07 15:16:04 +10002270 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002271 TASK_INTERRUPTIBLE))
2272 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002275 /* for unlimited number of responses we will use buffer with
2276 * 255 entries
2277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2279
2280 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2281 * copy it to the user space.
2282 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002283 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002284 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 err = -ENOMEM;
2286 goto done;
2287 }
2288
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002289 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002291 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
2293 BT_DBG("num_rsp %d", ir.num_rsp);
2294
2295 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2296 ptr += sizeof(ir);
2297 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002298 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002300 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 err = -EFAULT;
2302
2303 kfree(buf);
2304
2305done:
2306 hci_dev_put(hdev);
2307 return err;
2308}
2309
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002310static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 int ret = 0;
2313
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 BT_DBG("%s %p", hdev->name, hdev);
2315
2316 hci_req_lock(hdev);
2317
Johan Hovold94324962012-03-15 14:48:41 +01002318 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2319 ret = -ENODEV;
2320 goto done;
2321 }
2322
Marcel Holtmannd603b762014-07-06 12:11:14 +02002323 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2324 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002325 /* Check for rfkill but allow the HCI setup stage to
2326 * proceed (which in itself doesn't cause any RF activity).
2327 */
2328 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2329 ret = -ERFKILL;
2330 goto done;
2331 }
2332
2333 /* Check for valid public address or a configured static
2334 * random adddress, but let the HCI setup proceed to
2335 * be able to determine if there is a public address
2336 * or not.
2337 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002338 * In case of user channel usage, it is not important
2339 * if a public address or static random address is
2340 * available.
2341 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002342 * This check is only valid for BR/EDR controllers
2343 * since AMP controllers do not have an address.
2344 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002345 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2346 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002347 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2348 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2349 ret = -EADDRNOTAVAIL;
2350 goto done;
2351 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002352 }
2353
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354 if (test_bit(HCI_UP, &hdev->flags)) {
2355 ret = -EALREADY;
2356 goto done;
2357 }
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (hdev->open(hdev)) {
2360 ret = -EIO;
2361 goto done;
2362 }
2363
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002364 atomic_set(&hdev->cmd_cnt, 1);
2365 set_bit(HCI_INIT, &hdev->flags);
2366
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002367 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2368 if (hdev->setup)
2369 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002370
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002371 /* The transport driver can set these quirks before
2372 * creating the HCI device or in its setup callback.
2373 *
2374 * In case any of them is set, the controller has to
2375 * start up as unconfigured.
2376 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002377 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2378 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002379 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002380
2381 /* For an unconfigured controller it is required to
2382 * read at least the version information provided by
2383 * the Read Local Version Information command.
2384 *
2385 * If the set_bdaddr driver callback is provided, then
2386 * also the original Bluetooth public device address
2387 * will be read using the Read BD Address command.
2388 */
2389 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2390 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002391 }
2392
Marcel Holtmann9713c172014-07-06 12:11:15 +02002393 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2394 /* If public address change is configured, ensure that
2395 * the address gets programmed. If the driver does not
2396 * support changing the public address, fail the power
2397 * on procedure.
2398 */
2399 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2400 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002401 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2402 else
2403 ret = -EADDRNOTAVAIL;
2404 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002405
2406 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002407 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002408 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002409 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 }
2411
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002412 clear_bit(HCI_INIT, &hdev->flags);
2413
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 if (!ret) {
2415 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002416 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 set_bit(HCI_UP, &hdev->flags);
2418 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002419 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002420 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002421 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002422 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002423 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002424 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002425 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002427 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002428 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002430 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002431 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002432 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433
2434 skb_queue_purge(&hdev->cmd_q);
2435 skb_queue_purge(&hdev->rx_q);
2436
2437 if (hdev->flush)
2438 hdev->flush(hdev);
2439
2440 if (hdev->sent_cmd) {
2441 kfree_skb(hdev->sent_cmd);
2442 hdev->sent_cmd = NULL;
2443 }
2444
2445 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002446 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 }
2448
2449done:
2450 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return ret;
2452}
2453
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002454/* ---- HCI ioctl helpers ---- */
2455
2456int hci_dev_open(__u16 dev)
2457{
2458 struct hci_dev *hdev;
2459 int err;
2460
2461 hdev = hci_dev_get(dev);
2462 if (!hdev)
2463 return -ENODEV;
2464
Marcel Holtmann4a964402014-07-02 19:10:33 +02002465 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002466 * up as user channel. Trying to bring them up as normal devices
2467 * will result into a failure. Only user channel operation is
2468 * possible.
2469 *
2470 * When this function is called for a user channel, the flag
2471 * HCI_USER_CHANNEL will be set first before attempting to
2472 * open the device.
2473 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002474 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002475 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2476 err = -EOPNOTSUPP;
2477 goto done;
2478 }
2479
Johan Hedberge1d08f42013-10-01 22:44:50 +03002480 /* We need to ensure that no other power on/off work is pending
2481 * before proceeding to call hci_dev_do_open. This is
2482 * particularly important if the setup procedure has not yet
2483 * completed.
2484 */
2485 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2486 cancel_delayed_work(&hdev->power_off);
2487
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002488 /* After this call it is guaranteed that the setup procedure
2489 * has finished. This means that error conditions like RFKILL
2490 * or no valid public or static random address apply.
2491 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002492 flush_workqueue(hdev->req_workqueue);
2493
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002494 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002495 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002496 * so that pairing works for them. Once the management interface
2497 * is in use this bit will be cleared again and userspace has
2498 * to explicitly enable it.
2499 */
2500 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2501 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002502 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002503
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002504 err = hci_dev_do_open(hdev);
2505
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002506done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002507 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002508 return err;
2509}
2510
Johan Hedbergd7347f32014-07-04 12:37:23 +03002511/* This function requires the caller holds hdev->lock */
2512static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2513{
2514 struct hci_conn_params *p;
2515
Johan Hedbergf161dd42014-08-15 21:06:54 +03002516 list_for_each_entry(p, &hdev->le_conn_params, list) {
2517 if (p->conn) {
2518 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002519 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002520 p->conn = NULL;
2521 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002522 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002523 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524
2525 BT_DBG("All LE pending actions cleared");
2526}
2527
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528static int hci_dev_do_close(struct hci_dev *hdev)
2529{
2530 BT_DBG("%s %p", hdev->name, hdev);
2531
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002532 cancel_delayed_work(&hdev->power_off);
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 hci_req_cancel(hdev, ENODEV);
2535 hci_req_lock(hdev);
2536
2537 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002538 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 hci_req_unlock(hdev);
2540 return 0;
2541 }
2542
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002543 /* Flush RX and TX works */
2544 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002545 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002547 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002548 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002549 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002550 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002551 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002552 }
2553
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002554 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002555 cancel_delayed_work(&hdev->service_cache);
2556
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002557 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002558
2559 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2560 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002561
Johan Hedberg76727c02014-11-18 09:00:14 +02002562 /* Avoid potential lockdep warnings from the *_flush() calls by
2563 * ensuring the workqueue is empty up front.
2564 */
2565 drain_workqueue(hdev->workqueue);
2566
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002567 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002568 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002570 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002571 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
2573 hci_notify(hdev, HCI_DEV_DOWN);
2574
2575 if (hdev->flush)
2576 hdev->flush(hdev);
2577
2578 /* Reset device */
2579 skb_queue_purge(&hdev->cmd_q);
2580 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002581 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2582 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002583 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 clear_bit(HCI_INIT, &hdev->flags);
2587 }
2588
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002589 /* flush cmd work */
2590 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 /* Drop queues */
2593 skb_queue_purge(&hdev->rx_q);
2594 skb_queue_purge(&hdev->cmd_q);
2595 skb_queue_purge(&hdev->raw_q);
2596
2597 /* Drop last sent command */
2598 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 kfree_skb(hdev->sent_cmd);
2601 hdev->sent_cmd = NULL;
2602 }
2603
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002604 kfree_skb(hdev->recv_evt);
2605 hdev->recv_evt = NULL;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /* After this point our queues are empty
2608 * and no tasks are scheduled. */
2609 hdev->close(hdev);
2610
Johan Hedberg35b973c2013-03-15 17:06:59 -05002611 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002612 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002613 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2614
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002615 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2616 if (hdev->dev_type == HCI_BREDR) {
2617 hci_dev_lock(hdev);
2618 mgmt_powered(hdev, 0);
2619 hci_dev_unlock(hdev);
2620 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002621 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002622
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002623 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002624 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002625
Johan Hedberge59fda82012-02-22 18:11:53 +02002626 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002627 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002628 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 hci_req_unlock(hdev);
2631
2632 hci_dev_put(hdev);
2633 return 0;
2634}
2635
2636int hci_dev_close(__u16 dev)
2637{
2638 struct hci_dev *hdev;
2639 int err;
2640
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002641 hdev = hci_dev_get(dev);
2642 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 err = -EBUSY;
2647 goto done;
2648 }
2649
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002650 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2651 cancel_delayed_work(&hdev->power_off);
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002655done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 hci_dev_put(hdev);
2657 return err;
2658}
2659
2660int hci_dev_reset(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int ret = 0;
2664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002665 hdev = hci_dev_get(dev);
2666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 return -ENODEV;
2668
2669 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Marcel Holtmann808a0492013-08-26 20:57:58 -07002671 if (!test_bit(HCI_UP, &hdev->flags)) {
2672 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 /* Drop queues */
2687 skb_queue_purge(&hdev->rx_q);
2688 skb_queue_purge(&hdev->cmd_q);
2689
Johan Hedberg76727c02014-11-18 09:00:14 +02002690 /* Avoid potential lockdep warnings from the *_flush() calls by
2691 * ensuring the workqueue is empty up front.
2692 */
2693 drain_workqueue(hdev->workqueue);
2694
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002695 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002696 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002698 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700 if (hdev->flush)
2701 hdev->flush(hdev);
2702
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002703 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002706 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
2708done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 hci_req_unlock(hdev);
2710 hci_dev_put(hdev);
2711 return ret;
2712}
2713
2714int hci_dev_reset_stat(__u16 dev)
2715{
2716 struct hci_dev *hdev;
2717 int ret = 0;
2718
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002719 hdev = hci_dev_get(dev);
2720 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 return -ENODEV;
2722
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002723 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2724 ret = -EBUSY;
2725 goto done;
2726 }
2727
Marcel Holtmann4a964402014-07-02 19:10:33 +02002728 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002729 ret = -EOPNOTSUPP;
2730 goto done;
2731 }
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2734
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002735done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 return ret;
2738}
2739
Johan Hedberg123abc02014-07-10 12:09:07 +03002740static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2741{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002742 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002743
2744 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2745
2746 if ((scan & SCAN_PAGE))
2747 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2748 &hdev->dev_flags);
2749 else
2750 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2751 &hdev->dev_flags);
2752
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002753 if ((scan & SCAN_INQUIRY)) {
2754 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2755 &hdev->dev_flags);
2756 } else {
2757 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2758 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2759 &hdev->dev_flags);
2760 }
2761
Johan Hedberg123abc02014-07-10 12:09:07 +03002762 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2763 return;
2764
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002765 if (conn_changed || discov_changed) {
2766 /* In case this was disabled through mgmt */
2767 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2768
2769 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2770 mgmt_update_adv_data(hdev);
2771
Johan Hedberg123abc02014-07-10 12:09:07 +03002772 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002773 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002774}
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776int hci_dev_cmd(unsigned int cmd, void __user *arg)
2777{
2778 struct hci_dev *hdev;
2779 struct hci_dev_req dr;
2780 int err = 0;
2781
2782 if (copy_from_user(&dr, arg, sizeof(dr)))
2783 return -EFAULT;
2784
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002785 hdev = hci_dev_get(dr.dev_id);
2786 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 return -ENODEV;
2788
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002789 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2790 err = -EBUSY;
2791 goto done;
2792 }
2793
Marcel Holtmann4a964402014-07-02 19:10:33 +02002794 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002795 err = -EOPNOTSUPP;
2796 goto done;
2797 }
2798
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002799 if (hdev->dev_type != HCI_BREDR) {
2800 err = -EOPNOTSUPP;
2801 goto done;
2802 }
2803
Johan Hedberg56f87902013-10-02 13:43:13 +03002804 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2805 err = -EOPNOTSUPP;
2806 goto done;
2807 }
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 switch (cmd) {
2810 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002811 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2812 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 break;
2814
2815 case HCISETENCRYPT:
2816 if (!lmp_encrypt_capable(hdev)) {
2817 err = -EOPNOTSUPP;
2818 break;
2819 }
2820
2821 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2822 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002823 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2824 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 if (err)
2826 break;
2827 }
2828
Johan Hedberg01178cd2013-03-05 20:37:41 +02002829 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2830 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 break;
2832
2833 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002834 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2835 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002836
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002837 /* Ensure that the connectable and discoverable states
2838 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002839 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002840 if (!err)
2841 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 break;
2843
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002844 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002845 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2846 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002847 break;
2848
2849 case HCISETLINKMODE:
2850 hdev->link_mode = ((__u16) dr.dev_opt) &
2851 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2852 break;
2853
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 case HCISETPTYPE:
2855 hdev->pkt_type = (__u16) dr.dev_opt;
2856 break;
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002859 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2860 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 break;
2862
2863 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002864 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2865 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 break;
2867
2868 default:
2869 err = -EINVAL;
2870 break;
2871 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002872
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002873done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 hci_dev_put(hdev);
2875 return err;
2876}
2877
2878int hci_get_dev_list(void __user *arg)
2879{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002880 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 struct hci_dev_list_req *dl;
2882 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 int n = 0, size, err;
2884 __u16 dev_num;
2885
2886 if (get_user(dev_num, (__u16 __user *) arg))
2887 return -EFAULT;
2888
2889 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2890 return -EINVAL;
2891
2892 size = sizeof(*dl) + dev_num * sizeof(*dr);
2893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002894 dl = kzalloc(size, GFP_KERNEL);
2895 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 return -ENOMEM;
2897
2898 dr = dl->dev_req;
2899
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002900 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002901 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002902 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002903
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002904 /* When the auto-off is configured it means the transport
2905 * is running, but in that case still indicate that the
2906 * device is actually down.
2907 */
2908 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2909 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002910
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002912 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002913
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 if (++n >= dev_num)
2915 break;
2916 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002917 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
2919 dl->dev_num = n;
2920 size = sizeof(*dl) + n * sizeof(*dr);
2921
2922 err = copy_to_user(arg, dl, size);
2923 kfree(dl);
2924
2925 return err ? -EFAULT : 0;
2926}
2927
2928int hci_get_dev_info(void __user *arg)
2929{
2930 struct hci_dev *hdev;
2931 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002932 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 int err = 0;
2934
2935 if (copy_from_user(&di, arg, sizeof(di)))
2936 return -EFAULT;
2937
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002938 hdev = hci_dev_get(di.dev_id);
2939 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return -ENODEV;
2941
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002942 /* When the auto-off is configured it means the transport
2943 * is running, but in that case still indicate that the
2944 * device is actually down.
2945 */
2946 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2947 flags = hdev->flags & ~BIT(HCI_UP);
2948 else
2949 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002950
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 strcpy(di.name, hdev->name);
2952 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002953 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002954 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002956 if (lmp_bredr_capable(hdev)) {
2957 di.acl_mtu = hdev->acl_mtu;
2958 di.acl_pkts = hdev->acl_pkts;
2959 di.sco_mtu = hdev->sco_mtu;
2960 di.sco_pkts = hdev->sco_pkts;
2961 } else {
2962 di.acl_mtu = hdev->le_mtu;
2963 di.acl_pkts = hdev->le_pkts;
2964 di.sco_mtu = 0;
2965 di.sco_pkts = 0;
2966 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967 di.link_policy = hdev->link_policy;
2968 di.link_mode = hdev->link_mode;
2969
2970 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2971 memcpy(&di.features, &hdev->features, sizeof(di.features));
2972
2973 if (copy_to_user(arg, &di, sizeof(di)))
2974 err = -EFAULT;
2975
2976 hci_dev_put(hdev);
2977
2978 return err;
2979}
2980
2981/* ---- Interface to HCI drivers ---- */
2982
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002983static int hci_rfkill_set_block(void *data, bool blocked)
2984{
2985 struct hci_dev *hdev = data;
2986
2987 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2988
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002989 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2990 return -EBUSY;
2991
Johan Hedberg5e130362013-09-13 08:58:17 +03002992 if (blocked) {
2993 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002994 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2995 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002996 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002997 } else {
2998 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002999 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003000
3001 return 0;
3002}
3003
3004static const struct rfkill_ops hci_rfkill_ops = {
3005 .set_block = hci_rfkill_set_block,
3006};
3007
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003008static void hci_power_on(struct work_struct *work)
3009{
3010 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003011 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003012
3013 BT_DBG("%s", hdev->name);
3014
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003015 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003016 if (err < 0) {
3017 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003018 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003019 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003020
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003021 /* During the HCI setup phase, a few error conditions are
3022 * ignored and they need to be checked now. If they are still
3023 * valid, it is important to turn the device back off.
3024 */
3025 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003026 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003027 (hdev->dev_type == HCI_BREDR &&
3028 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3029 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003030 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3031 hci_dev_do_close(hdev);
3032 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003033 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3034 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003035 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003036
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003037 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003038 /* For unconfigured devices, set the HCI_RAW flag
3039 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003040 */
3041 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3042 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003043
3044 /* For fully configured devices, this will send
3045 * the Index Added event. For unconfigured devices,
3046 * it will send Unconfigued Index Added event.
3047 *
3048 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3049 * and no event will be send.
3050 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003051 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003052 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003053 /* When the controller is now configured, then it
3054 * is important to clear the HCI_RAW flag.
3055 */
3056 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3057 clear_bit(HCI_RAW, &hdev->flags);
3058
Marcel Holtmannd603b762014-07-06 12:11:14 +02003059 /* Powering on the controller with HCI_CONFIG set only
3060 * happens with the transition from unconfigured to
3061 * configured. This will send the Index Added event.
3062 */
3063 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003064 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003065}
3066
3067static void hci_power_off(struct work_struct *work)
3068{
Johan Hedberg32435532011-11-07 22:16:04 +02003069 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003070 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003071
3072 BT_DBG("%s", hdev->name);
3073
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003074 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003075}
3076
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003077static void hci_discov_off(struct work_struct *work)
3078{
3079 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003080
3081 hdev = container_of(work, struct hci_dev, discov_off.work);
3082
3083 BT_DBG("%s", hdev->name);
3084
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003085 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003086}
3087
Johan Hedberg35f74982014-02-18 17:14:32 +02003088void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003089{
Johan Hedberg48210022013-01-27 00:31:28 +02003090 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003091
Johan Hedberg48210022013-01-27 00:31:28 +02003092 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3093 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003094 kfree(uuid);
3095 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003096}
3097
Johan Hedberg35f74982014-02-18 17:14:32 +02003098void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003099{
Johan Hedberg0378b592014-11-19 15:22:22 +02003100 struct link_key *key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003101
Johan Hedberg0378b592014-11-19 15:22:22 +02003102 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3103 list_del_rcu(&key->list);
3104 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003105 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003106}
3107
Johan Hedberg35f74982014-02-18 17:14:32 +02003108void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003109{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003110 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111
Johan Hedberg970d0f12014-11-13 14:37:47 +02003112 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3113 list_del_rcu(&k->list);
3114 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003116}
3117
Johan Hedberg970c4e42014-02-18 10:19:33 +02003118void hci_smp_irks_clear(struct hci_dev *hdev)
3119{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003120 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003121
Johan Hedbergadae20c2014-11-13 14:37:48 +02003122 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3123 list_del_rcu(&k->list);
3124 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003125 }
3126}
3127
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3129{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003130 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003131
Johan Hedberg0378b592014-11-19 15:22:22 +02003132 rcu_read_lock();
3133 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3134 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3135 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003136 return k;
Johan Hedberg0378b592014-11-19 15:22:22 +02003137 }
3138 }
3139 rcu_read_unlock();
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003140
3141 return NULL;
3142}
3143
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303144static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003145 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003146{
3147 /* Legacy key */
3148 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303149 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003150
3151 /* Debug keys are insecure so don't store them persistently */
3152 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303153 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003154
3155 /* Changed combination key and there's no previous one */
3156 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303157 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003158
3159 /* Security mode 3 case */
3160 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003162
3163 /* Neither local nor remote side had no-bonding as requirement */
3164 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303165 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003166
3167 /* Local side had dedicated bonding as requirement */
3168 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303169 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003170
3171 /* Remote side had dedicated bonding as requirement */
3172 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303173 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003174
3175 /* If none of the above criteria match, then don't store the key
3176 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303177 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003178}
3179
Johan Hedberge804d252014-07-16 11:42:28 +03003180static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003181{
Johan Hedberge804d252014-07-16 11:42:28 +03003182 if (type == SMP_LTK)
3183 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003184
Johan Hedberge804d252014-07-16 11:42:28 +03003185 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003186}
3187
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003188struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3189 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003190{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003191 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003192
Johan Hedberg970d0f12014-11-13 14:37:47 +02003193 rcu_read_lock();
3194 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberg5378bc52014-05-29 14:00:39 +03003195 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
3196 continue;
3197
3198 if (smp_ltk_is_sc(k)) {
3199 if (k->type == SMP_LTK_P256_DEBUG &&
3200 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags))
3201 continue;
3202 rcu_read_unlock();
3203 return k;
3204 }
3205
3206 if (ltk_role(k->type) == role) {
Johan Hedberg970d0f12014-11-13 14:37:47 +02003207 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003208 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003209 }
3210 }
3211 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003212
3213 return NULL;
3214}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003215
Johan Hedberg970c4e42014-02-18 10:19:33 +02003216struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3217{
3218 struct smp_irk *irk;
3219
Johan Hedbergadae20c2014-11-13 14:37:48 +02003220 rcu_read_lock();
3221 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3222 if (!bacmp(&irk->rpa, rpa)) {
3223 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003224 return irk;
3225 }
3226 }
3227
Johan Hedbergadae20c2014-11-13 14:37:48 +02003228 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3229 if (smp_irk_matches(hdev, irk->val, rpa)) {
3230 bacpy(&irk->rpa, rpa);
3231 rcu_read_unlock();
3232 return irk;
3233 }
3234 }
3235 rcu_read_unlock();
3236
Johan Hedberg970c4e42014-02-18 10:19:33 +02003237 return NULL;
3238}
3239
3240struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3241 u8 addr_type)
3242{
3243 struct smp_irk *irk;
3244
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003245 /* Identity Address must be public or static random */
3246 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3247 return NULL;
3248
Johan Hedbergadae20c2014-11-13 14:37:48 +02003249 rcu_read_lock();
3250 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003251 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003252 bacmp(bdaddr, &irk->bdaddr) == 0) {
3253 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003254 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003255 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003256 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003257 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003258
3259 return NULL;
3260}
3261
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003262struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003263 bdaddr_t *bdaddr, u8 *val, u8 type,
3264 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003265{
3266 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303267 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003268
3269 old_key = hci_find_link_key(hdev, bdaddr);
3270 if (old_key) {
3271 old_key_type = old_key->type;
3272 key = old_key;
3273 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003274 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003275 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003276 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003277 return NULL;
Johan Hedberg0378b592014-11-19 15:22:22 +02003278 list_add_rcu(&key->list, &hdev->link_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003279 }
3280
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003281 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003282
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003283 /* Some buggy controller combinations generate a changed
3284 * combination key for legacy pairing even when there's no
3285 * previous key */
3286 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003287 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003288 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003289 if (conn)
3290 conn->key_type = type;
3291 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003292
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003293 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003294 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003295 key->pin_len = pin_len;
3296
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003297 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003298 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003299 else
3300 key->type = type;
3301
Johan Hedberg7652ff62014-06-24 13:15:49 +03003302 if (persistent)
3303 *persistent = hci_persistent_key(hdev, conn, type,
3304 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003305
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003306 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003307}
3308
Johan Hedbergca9142b2014-02-19 14:57:44 +02003309struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003310 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003311 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003312{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003313 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003314 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003315
Johan Hedbergf3a73d92014-05-29 15:02:59 +03003316 old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003317 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003318 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003319 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003320 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003321 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003322 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003323 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003324 }
3325
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003326 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003327 key->bdaddr_type = addr_type;
3328 memcpy(key->val, tk, sizeof(key->val));
3329 key->authenticated = authenticated;
3330 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003331 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003332 key->enc_size = enc_size;
3333 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003334
Johan Hedbergca9142b2014-02-19 14:57:44 +02003335 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003336}
3337
Johan Hedbergca9142b2014-02-19 14:57:44 +02003338struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3339 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003340{
3341 struct smp_irk *irk;
3342
3343 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3344 if (!irk) {
3345 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3346 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003347 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003348
3349 bacpy(&irk->bdaddr, bdaddr);
3350 irk->addr_type = addr_type;
3351
Johan Hedbergadae20c2014-11-13 14:37:48 +02003352 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003353 }
3354
3355 memcpy(irk->val, val, 16);
3356 bacpy(&irk->rpa, rpa);
3357
Johan Hedbergca9142b2014-02-19 14:57:44 +02003358 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003359}
3360
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003361int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3362{
3363 struct link_key *key;
3364
3365 key = hci_find_link_key(hdev, bdaddr);
3366 if (!key)
3367 return -ENOENT;
3368
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003369 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003370
Johan Hedberg0378b592014-11-19 15:22:22 +02003371 list_del_rcu(&key->list);
3372 kfree_rcu(key, rcu);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003373
3374 return 0;
3375}
3376
Johan Hedberge0b2b272014-02-18 17:14:31 +02003377int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003378{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003379 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003380 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003381
Johan Hedberg970d0f12014-11-13 14:37:47 +02003382 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003383 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003384 continue;
3385
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003386 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003387
Johan Hedberg970d0f12014-11-13 14:37:47 +02003388 list_del_rcu(&k->list);
3389 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003390 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003391 }
3392
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003393 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003394}
3395
Johan Hedberga7ec7332014-02-18 17:14:35 +02003396void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3397{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003398 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003399
Johan Hedbergadae20c2014-11-13 14:37:48 +02003400 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003401 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3402 continue;
3403
3404 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3405
Johan Hedbergadae20c2014-11-13 14:37:48 +02003406 list_del_rcu(&k->list);
3407 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003408 }
3409}
3410
Ville Tervo6bd32322011-02-16 16:32:41 +02003411/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003412static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003413{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003414 struct hci_dev *hdev = container_of(work, struct hci_dev,
3415 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003416
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003417 if (hdev->sent_cmd) {
3418 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3419 u16 opcode = __le16_to_cpu(sent->opcode);
3420
3421 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3422 } else {
3423 BT_ERR("%s command tx timeout", hdev->name);
3424 }
3425
Ville Tervo6bd32322011-02-16 16:32:41 +02003426 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003427 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003428}
3429
Szymon Janc2763eda2011-03-22 13:12:22 +01003430struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003431 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003432{
3433 struct oob_data *data;
3434
3435 list_for_each_entry(data, &hdev->remote_oob_data, list)
3436 if (bacmp(bdaddr, &data->bdaddr) == 0)
3437 return data;
3438
3439 return NULL;
3440}
3441
3442int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3443{
3444 struct oob_data *data;
3445
3446 data = hci_find_remote_oob_data(hdev, bdaddr);
3447 if (!data)
3448 return -ENOENT;
3449
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003450 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003451
3452 list_del(&data->list);
3453 kfree(data);
3454
3455 return 0;
3456}
3457
Johan Hedberg35f74982014-02-18 17:14:32 +02003458void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003459{
3460 struct oob_data *data, *n;
3461
3462 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3463 list_del(&data->list);
3464 kfree(data);
3465 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003466}
3467
Marcel Holtmann07988722014-01-10 02:07:29 -08003468int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003469 u8 *hash, u8 *rand)
Szymon Janc2763eda2011-03-22 13:12:22 +01003470{
3471 struct oob_data *data;
3472
3473 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003474 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003475 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003476 if (!data)
3477 return -ENOMEM;
3478
3479 bacpy(&data->bdaddr, bdaddr);
3480 list_add(&data->list, &hdev->remote_oob_data);
3481 }
3482
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003483 memcpy(data->hash192, hash, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003484 memcpy(data->rand192, rand, sizeof(data->rand192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003485
Marcel Holtmann07988722014-01-10 02:07:29 -08003486 memset(data->hash256, 0, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003487 memset(data->rand256, 0, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003488
3489 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3490
3491 return 0;
3492}
3493
3494int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg38da1702014-11-17 20:52:20 +02003495 u8 *hash192, u8 *rand192,
3496 u8 *hash256, u8 *rand256)
Marcel Holtmann07988722014-01-10 02:07:29 -08003497{
3498 struct oob_data *data;
3499
3500 data = hci_find_remote_oob_data(hdev, bdaddr);
3501 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003502 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003503 if (!data)
3504 return -ENOMEM;
3505
3506 bacpy(&data->bdaddr, bdaddr);
3507 list_add(&data->list, &hdev->remote_oob_data);
3508 }
3509
3510 memcpy(data->hash192, hash192, sizeof(data->hash192));
Johan Hedberg38da1702014-11-17 20:52:20 +02003511 memcpy(data->rand192, rand192, sizeof(data->rand192));
Marcel Holtmann07988722014-01-10 02:07:29 -08003512
3513 memcpy(data->hash256, hash256, sizeof(data->hash256));
Johan Hedberg38da1702014-11-17 20:52:20 +02003514 memcpy(data->rand256, rand256, sizeof(data->rand256));
Marcel Holtmann07988722014-01-10 02:07:29 -08003515
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003516 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003517
3518 return 0;
3519}
3520
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003521struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003522 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003523{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003524 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003525
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003526 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003527 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003528 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003529 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003530
3531 return NULL;
3532}
3533
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003534void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003535{
3536 struct list_head *p, *n;
3537
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003538 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003539 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003540
3541 list_del(p);
3542 kfree(b);
3543 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544}
3545
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003546int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003547{
3548 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003549
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003550 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003551 return -EBADF;
3552
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003553 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003554 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003555
Johan Hedberg27f70f32014-07-21 10:50:06 +03003556 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003557 if (!entry)
3558 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003559
3560 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003561 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003562
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003563 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003564
3565 return 0;
3566}
3567
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003568int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003569{
3570 struct bdaddr_list *entry;
3571
Johan Hedberg35f74982014-02-18 17:14:32 +02003572 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003573 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003574 return 0;
3575 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003576
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003577 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003578 if (!entry)
3579 return -ENOENT;
3580
3581 list_del(&entry->list);
3582 kfree(entry);
3583
3584 return 0;
3585}
3586
Andre Guedes15819a72014-02-03 13:56:18 -03003587/* This function requires the caller holds hdev->lock */
3588struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3589 bdaddr_t *addr, u8 addr_type)
3590{
3591 struct hci_conn_params *params;
3592
Johan Hedberg738f6182014-07-03 19:33:51 +03003593 /* The conn params list only contains identity addresses */
3594 if (!hci_is_identity_address(addr, addr_type))
3595 return NULL;
3596
Andre Guedes15819a72014-02-03 13:56:18 -03003597 list_for_each_entry(params, &hdev->le_conn_params, list) {
3598 if (bacmp(&params->addr, addr) == 0 &&
3599 params->addr_type == addr_type) {
3600 return params;
3601 }
3602 }
3603
3604 return NULL;
3605}
3606
Andre Guedescef952c2014-02-26 20:21:49 -03003607static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3608{
3609 struct hci_conn *conn;
3610
3611 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3612 if (!conn)
3613 return false;
3614
3615 if (conn->dst_type != type)
3616 return false;
3617
3618 if (conn->state != BT_CONNECTED)
3619 return false;
3620
3621 return true;
3622}
3623
Andre Guedes15819a72014-02-03 13:56:18 -03003624/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003625struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3626 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003627{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003628 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003629
Johan Hedberg738f6182014-07-03 19:33:51 +03003630 /* The list only contains identity addresses */
3631 if (!hci_is_identity_address(addr, addr_type))
3632 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003633
Johan Hedberg501f8822014-07-04 12:37:26 +03003634 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003635 if (bacmp(&param->addr, addr) == 0 &&
3636 param->addr_type == addr_type)
3637 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003638 }
3639
3640 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003641}
3642
3643/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003644struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3645 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003646{
3647 struct hci_conn_params *params;
3648
Johan Hedbergc46245b2014-07-02 17:37:33 +03003649 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003650 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003651
Andre Guedes15819a72014-02-03 13:56:18 -03003652 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003653 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003654 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003655
3656 params = kzalloc(sizeof(*params), GFP_KERNEL);
3657 if (!params) {
3658 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003659 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003660 }
3661
3662 bacpy(&params->addr, addr);
3663 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003664
3665 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003666 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003667
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003668 params->conn_min_interval = hdev->le_conn_min_interval;
3669 params->conn_max_interval = hdev->le_conn_max_interval;
3670 params->conn_latency = hdev->le_conn_latency;
3671 params->supervision_timeout = hdev->le_supv_timeout;
3672 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3673
3674 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3675
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003676 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003677}
3678
3679/* This function requires the caller holds hdev->lock */
3680int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003681 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003682{
3683 struct hci_conn_params *params;
3684
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003685 params = hci_conn_params_add(hdev, addr, addr_type);
3686 if (!params)
3687 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003688
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003689 if (params->auto_connect == auto_connect)
3690 return 0;
3691
Johan Hedberg95305ba2014-07-04 12:37:21 +03003692 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003693
Andre Guedescef952c2014-02-26 20:21:49 -03003694 switch (auto_connect) {
3695 case HCI_AUTO_CONN_DISABLED:
3696 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003697 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003698 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003699 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003700 list_add(&params->action, &hdev->pend_le_reports);
3701 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003702 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003703 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003704 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003705 if (!is_connected(hdev, addr, addr_type)) {
3706 list_add(&params->action, &hdev->pend_le_conns);
3707 hci_update_background_scan(hdev);
3708 }
Andre Guedescef952c2014-02-26 20:21:49 -03003709 break;
3710 }
Andre Guedes15819a72014-02-03 13:56:18 -03003711
Johan Hedberg851efca2014-07-02 22:42:00 +03003712 params->auto_connect = auto_connect;
3713
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003714 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3715 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003716
3717 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003718}
3719
Johan Hedbergf6c63242014-08-15 21:06:59 +03003720static void hci_conn_params_free(struct hci_conn_params *params)
3721{
3722 if (params->conn) {
3723 hci_conn_drop(params->conn);
3724 hci_conn_put(params->conn);
3725 }
3726
3727 list_del(&params->action);
3728 list_del(&params->list);
3729 kfree(params);
3730}
3731
Andre Guedes15819a72014-02-03 13:56:18 -03003732/* This function requires the caller holds hdev->lock */
3733void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3734{
3735 struct hci_conn_params *params;
3736
3737 params = hci_conn_params_lookup(hdev, addr, addr_type);
3738 if (!params)
3739 return;
3740
Johan Hedbergf6c63242014-08-15 21:06:59 +03003741 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003742
Johan Hedberg95305ba2014-07-04 12:37:21 +03003743 hci_update_background_scan(hdev);
3744
Andre Guedes15819a72014-02-03 13:56:18 -03003745 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3746}
3747
3748/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003749void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003750{
3751 struct hci_conn_params *params, *tmp;
3752
3753 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003754 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3755 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003756 list_del(&params->list);
3757 kfree(params);
3758 }
3759
Johan Hedberg55af49a2014-07-02 17:37:26 +03003760 BT_DBG("All LE disabled connection parameters were removed");
3761}
3762
3763/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003764void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003765{
3766 struct hci_conn_params *params, *tmp;
3767
Johan Hedbergf6c63242014-08-15 21:06:59 +03003768 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3769 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003770
Johan Hedberga2f41a82014-07-04 12:37:19 +03003771 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003772
Andre Guedes15819a72014-02-03 13:56:18 -03003773 BT_DBG("All LE connection parameters were removed");
3774}
3775
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003776static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003777{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003778 if (status) {
3779 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003780
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003781 hci_dev_lock(hdev);
3782 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3783 hci_dev_unlock(hdev);
3784 return;
3785 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003786}
3787
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003788static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003789{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003790 /* General inquiry access code (GIAC) */
3791 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3792 struct hci_request req;
3793 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003794 int err;
3795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003796 if (status) {
3797 BT_ERR("Failed to disable LE scanning: status %d", status);
3798 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003799 }
3800
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003801 switch (hdev->discovery.type) {
3802 case DISCOV_TYPE_LE:
3803 hci_dev_lock(hdev);
3804 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3805 hci_dev_unlock(hdev);
3806 break;
3807
3808 case DISCOV_TYPE_INTERLEAVED:
3809 hci_req_init(&req, hdev);
3810
3811 memset(&cp, 0, sizeof(cp));
3812 memcpy(&cp.lap, lap, sizeof(cp.lap));
3813 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3814 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3815
3816 hci_dev_lock(hdev);
3817
3818 hci_inquiry_cache_flush(hdev);
3819
3820 err = hci_req_run(&req, inquiry_complete);
3821 if (err) {
3822 BT_ERR("Inquiry request failed: err %d", err);
3823 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3824 }
3825
3826 hci_dev_unlock(hdev);
3827 break;
3828 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003829}
3830
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003831static void le_scan_disable_work(struct work_struct *work)
3832{
3833 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003834 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003835 struct hci_request req;
3836 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003837
3838 BT_DBG("%s", hdev->name);
3839
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003840 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003841
Andre Guedesb1efcc22014-02-26 20:21:40 -03003842 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003843
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003844 err = hci_req_run(&req, le_scan_disable_work_complete);
3845 if (err)
3846 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003847}
3848
Johan Hedberg8d972502014-02-28 12:54:14 +02003849static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3850{
3851 struct hci_dev *hdev = req->hdev;
3852
3853 /* If we're advertising or initiating an LE connection we can't
3854 * go ahead and change the random address at this time. This is
3855 * because the eventual initiator address used for the
3856 * subsequently created connection will be undefined (some
3857 * controllers use the new address and others the one we had
3858 * when the operation started).
3859 *
3860 * In this kind of scenario skip the update and let the random
3861 * address be updated at the next cycle.
3862 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003863 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003864 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3865 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003866 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003867 return;
3868 }
3869
3870 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3871}
3872
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003873int hci_update_random_address(struct hci_request *req, bool require_privacy,
3874 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003875{
3876 struct hci_dev *hdev = req->hdev;
3877 int err;
3878
3879 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003880 * current RPA has expired or there is something else than
3881 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003882 */
3883 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003884 int to;
3885
3886 *own_addr_type = ADDR_LE_DEV_RANDOM;
3887
3888 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003889 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003890 return 0;
3891
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003892 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003893 if (err < 0) {
3894 BT_ERR("%s failed to generate new RPA", hdev->name);
3895 return err;
3896 }
3897
Johan Hedberg8d972502014-02-28 12:54:14 +02003898 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003899
3900 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3901 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3902
3903 return 0;
3904 }
3905
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003906 /* In case of required privacy without resolvable private address,
3907 * use an unresolvable private address. This is useful for active
3908 * scanning and non-connectable advertising.
3909 */
3910 if (require_privacy) {
3911 bdaddr_t urpa;
3912
3913 get_random_bytes(&urpa, 6);
3914 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3915
3916 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003917 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003918 return 0;
3919 }
3920
Johan Hedbergebd3a742014-02-23 19:42:21 +02003921 /* If forcing static address is in use or there is no public
3922 * address use the static address as random address (but skip
3923 * the HCI command if the current random address is already the
3924 * static one.
3925 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003926 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003927 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3928 *own_addr_type = ADDR_LE_DEV_RANDOM;
3929 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3930 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3931 &hdev->static_addr);
3932 return 0;
3933 }
3934
3935 /* Neither privacy nor static address is being used so use a
3936 * public address.
3937 */
3938 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3939
3940 return 0;
3941}
3942
Johan Hedberga1f4c312014-02-27 14:05:41 +02003943/* Copy the Identity Address of the controller.
3944 *
3945 * If the controller has a public BD_ADDR, then by default use that one.
3946 * If this is a LE only controller without a public address, default to
3947 * the static random address.
3948 *
3949 * For debugging purposes it is possible to force controllers with a
3950 * public address to use the static random address instead.
3951 */
3952void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3953 u8 *bdaddr_type)
3954{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003955 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003956 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3957 bacpy(bdaddr, &hdev->static_addr);
3958 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3959 } else {
3960 bacpy(bdaddr, &hdev->bdaddr);
3961 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3962 }
3963}
3964
David Herrmann9be0dab2012-04-22 14:39:57 +02003965/* Alloc HCI device */
3966struct hci_dev *hci_alloc_dev(void)
3967{
3968 struct hci_dev *hdev;
3969
Johan Hedberg27f70f32014-07-21 10:50:06 +03003970 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003971 if (!hdev)
3972 return NULL;
3973
David Herrmannb1b813d2012-04-22 14:39:58 +02003974 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3975 hdev->esco_type = (ESCO_HV1);
3976 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003977 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3978 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003979 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003980 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3981 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003982
David Herrmannb1b813d2012-04-22 14:39:58 +02003983 hdev->sniff_max_interval = 800;
3984 hdev->sniff_min_interval = 80;
3985
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003986 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003987 hdev->le_adv_min_interval = 0x0800;
3988 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003989 hdev->le_scan_interval = 0x0060;
3990 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003991 hdev->le_conn_min_interval = 0x0028;
3992 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003993 hdev->le_conn_latency = 0x0000;
3994 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003995
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003996 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003997 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003998 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3999 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004000
David Herrmannb1b813d2012-04-22 14:39:58 +02004001 mutex_init(&hdev->lock);
4002 mutex_init(&hdev->req_lock);
4003
4004 INIT_LIST_HEAD(&hdev->mgmt_pending);
4005 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004006 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004007 INIT_LIST_HEAD(&hdev->uuids);
4008 INIT_LIST_HEAD(&hdev->link_keys);
4009 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004010 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004011 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004012 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004013 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004014 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004015 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004016 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004017
4018 INIT_WORK(&hdev->rx_work, hci_rx_work);
4019 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4020 INIT_WORK(&hdev->tx_work, hci_tx_work);
4021 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004022
David Herrmannb1b813d2012-04-22 14:39:58 +02004023 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4024 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4025 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4026
David Herrmannb1b813d2012-04-22 14:39:58 +02004027 skb_queue_head_init(&hdev->rx_q);
4028 skb_queue_head_init(&hdev->cmd_q);
4029 skb_queue_head_init(&hdev->raw_q);
4030
4031 init_waitqueue_head(&hdev->req_wait_q);
4032
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004033 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004034
David Herrmannb1b813d2012-04-22 14:39:58 +02004035 hci_init_sysfs(hdev);
4036 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004037
4038 return hdev;
4039}
4040EXPORT_SYMBOL(hci_alloc_dev);
4041
4042/* Free HCI device */
4043void hci_free_dev(struct hci_dev *hdev)
4044{
David Herrmann9be0dab2012-04-22 14:39:57 +02004045 /* will free via device release */
4046 put_device(&hdev->dev);
4047}
4048EXPORT_SYMBOL(hci_free_dev);
4049
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050/* Register HCI device */
4051int hci_register_dev(struct hci_dev *hdev)
4052{
David Herrmannb1b813d2012-04-22 14:39:58 +02004053 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
Marcel Holtmann74292d52014-07-06 15:50:27 +02004055 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056 return -EINVAL;
4057
Mat Martineau08add512011-11-02 16:18:36 -07004058 /* Do not allow HCI_AMP devices to register at index 0,
4059 * so the index can be used as the AMP controller ID.
4060 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004061 switch (hdev->dev_type) {
4062 case HCI_BREDR:
4063 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4064 break;
4065 case HCI_AMP:
4066 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4067 break;
4068 default:
4069 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004071
Sasha Levin3df92b32012-05-27 22:36:56 +02004072 if (id < 0)
4073 return id;
4074
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 sprintf(hdev->name, "hci%d", id);
4076 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004077
4078 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4079
Kees Cookd8537542013-07-03 15:04:57 -07004080 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4081 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004082 if (!hdev->workqueue) {
4083 error = -ENOMEM;
4084 goto err;
4085 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004086
Kees Cookd8537542013-07-03 15:04:57 -07004087 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4088 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004089 if (!hdev->req_workqueue) {
4090 destroy_workqueue(hdev->workqueue);
4091 error = -ENOMEM;
4092 goto err;
4093 }
4094
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004095 if (!IS_ERR_OR_NULL(bt_debugfs))
4096 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4097
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004098 dev_set_name(&hdev->dev, "%s", hdev->name);
4099
4100 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004101 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004102 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004104 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004105 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4106 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004107 if (hdev->rfkill) {
4108 if (rfkill_register(hdev->rfkill) < 0) {
4109 rfkill_destroy(hdev->rfkill);
4110 hdev->rfkill = NULL;
4111 }
4112 }
4113
Johan Hedberg5e130362013-09-13 08:58:17 +03004114 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4115 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4116
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004117 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004118 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004119
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004120 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004121 /* Assume BR/EDR support until proven otherwise (such as
4122 * through reading supported features during init.
4123 */
4124 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4125 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004126
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004127 write_lock(&hci_dev_list_lock);
4128 list_add(&hdev->list, &hci_dev_list);
4129 write_unlock(&hci_dev_list_lock);
4130
Marcel Holtmann4a964402014-07-02 19:10:33 +02004131 /* Devices that are marked for raw-only usage are unconfigured
4132 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004133 */
4134 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004135 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004136
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004138 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139
Johan Hedberg19202572013-01-14 22:33:51 +02004140 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004143
David Herrmann33ca9542011-10-08 14:58:49 +02004144err_wqueue:
4145 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004146 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004147err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004148 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004149
David Herrmann33ca9542011-10-08 14:58:49 +02004150 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151}
4152EXPORT_SYMBOL(hci_register_dev);
4153
4154/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004155void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156{
Sasha Levin3df92b32012-05-27 22:36:56 +02004157 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004158
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004159 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160
Johan Hovold94324962012-03-15 14:48:41 +01004161 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4162
Sasha Levin3df92b32012-05-27 22:36:56 +02004163 id = hdev->id;
4164
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004165 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004166 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004167 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168
4169 hci_dev_do_close(hdev);
4170
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304171 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004172 kfree_skb(hdev->reassembly[i]);
4173
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004174 cancel_work_sync(&hdev->power_on);
4175
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004176 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004177 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4178 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004179 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004180 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004181 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004182 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004183
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004184 /* mgmt_index_removed should take care of emptying the
4185 * pending list */
4186 BUG_ON(!list_empty(&hdev->mgmt_pending));
4187
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188 hci_notify(hdev, HCI_DEV_UNREG);
4189
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004190 if (hdev->rfkill) {
4191 rfkill_unregister(hdev->rfkill);
4192 rfkill_destroy(hdev->rfkill);
4193 }
4194
Johan Hedberg711eafe2014-08-08 09:32:52 +03004195 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004196
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004197 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004198
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004199 debugfs_remove_recursive(hdev->debugfs);
4200
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004201 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004202 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004203
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004204 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004205 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004206 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004207 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004208 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004209 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004210 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004211 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004212 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004213 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004214 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004215
David Herrmanndc946bd2012-01-07 15:47:24 +01004216 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004217
4218 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219}
4220EXPORT_SYMBOL(hci_unregister_dev);
4221
4222/* Suspend HCI device */
4223int hci_suspend_dev(struct hci_dev *hdev)
4224{
4225 hci_notify(hdev, HCI_DEV_SUSPEND);
4226 return 0;
4227}
4228EXPORT_SYMBOL(hci_suspend_dev);
4229
4230/* Resume HCI device */
4231int hci_resume_dev(struct hci_dev *hdev)
4232{
4233 hci_notify(hdev, HCI_DEV_RESUME);
4234 return 0;
4235}
4236EXPORT_SYMBOL(hci_resume_dev);
4237
Marcel Holtmann75e05692014-11-02 08:15:38 +01004238/* Reset HCI device */
4239int hci_reset_dev(struct hci_dev *hdev)
4240{
4241 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4242 struct sk_buff *skb;
4243
4244 skb = bt_skb_alloc(3, GFP_ATOMIC);
4245 if (!skb)
4246 return -ENOMEM;
4247
4248 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4249 memcpy(skb_put(skb, 3), hw_err, 3);
4250
4251 /* Send Hardware Error to upper stack */
4252 return hci_recv_frame(hdev, skb);
4253}
4254EXPORT_SYMBOL(hci_reset_dev);
4255
Marcel Holtmann76bca882009-11-18 00:40:39 +01004256/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004257int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004258{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004259 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004260 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004261 kfree_skb(skb);
4262 return -ENXIO;
4263 }
4264
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004265 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004266 bt_cb(skb)->incoming = 1;
4267
4268 /* Time stamp */
4269 __net_timestamp(skb);
4270
Marcel Holtmann76bca882009-11-18 00:40:39 +01004271 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004272 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004273
Marcel Holtmann76bca882009-11-18 00:40:39 +01004274 return 0;
4275}
4276EXPORT_SYMBOL(hci_recv_frame);
4277
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304278static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004279 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304280{
4281 int len = 0;
4282 int hlen = 0;
4283 int remain = count;
4284 struct sk_buff *skb;
4285 struct bt_skb_cb *scb;
4286
4287 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004288 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304289 return -EILSEQ;
4290
4291 skb = hdev->reassembly[index];
4292
4293 if (!skb) {
4294 switch (type) {
4295 case HCI_ACLDATA_PKT:
4296 len = HCI_MAX_FRAME_SIZE;
4297 hlen = HCI_ACL_HDR_SIZE;
4298 break;
4299 case HCI_EVENT_PKT:
4300 len = HCI_MAX_EVENT_SIZE;
4301 hlen = HCI_EVENT_HDR_SIZE;
4302 break;
4303 case HCI_SCODATA_PKT:
4304 len = HCI_MAX_SCO_SIZE;
4305 hlen = HCI_SCO_HDR_SIZE;
4306 break;
4307 }
4308
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004309 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304310 if (!skb)
4311 return -ENOMEM;
4312
4313 scb = (void *) skb->cb;
4314 scb->expect = hlen;
4315 scb->pkt_type = type;
4316
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304317 hdev->reassembly[index] = skb;
4318 }
4319
4320 while (count) {
4321 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004322 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304323
4324 memcpy(skb_put(skb, len), data, len);
4325
4326 count -= len;
4327 data += len;
4328 scb->expect -= len;
4329 remain = count;
4330
4331 switch (type) {
4332 case HCI_EVENT_PKT:
4333 if (skb->len == HCI_EVENT_HDR_SIZE) {
4334 struct hci_event_hdr *h = hci_event_hdr(skb);
4335 scb->expect = h->plen;
4336
4337 if (skb_tailroom(skb) < scb->expect) {
4338 kfree_skb(skb);
4339 hdev->reassembly[index] = NULL;
4340 return -ENOMEM;
4341 }
4342 }
4343 break;
4344
4345 case HCI_ACLDATA_PKT:
4346 if (skb->len == HCI_ACL_HDR_SIZE) {
4347 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4348 scb->expect = __le16_to_cpu(h->dlen);
4349
4350 if (skb_tailroom(skb) < scb->expect) {
4351 kfree_skb(skb);
4352 hdev->reassembly[index] = NULL;
4353 return -ENOMEM;
4354 }
4355 }
4356 break;
4357
4358 case HCI_SCODATA_PKT:
4359 if (skb->len == HCI_SCO_HDR_SIZE) {
4360 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4361 scb->expect = h->dlen;
4362
4363 if (skb_tailroom(skb) < scb->expect) {
4364 kfree_skb(skb);
4365 hdev->reassembly[index] = NULL;
4366 return -ENOMEM;
4367 }
4368 }
4369 break;
4370 }
4371
4372 if (scb->expect == 0) {
4373 /* Complete frame */
4374
4375 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004376 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304377
4378 hdev->reassembly[index] = NULL;
4379 return remain;
4380 }
4381 }
4382
4383 return remain;
4384}
4385
Suraj Sumangala99811512010-07-14 13:02:19 +05304386#define STREAM_REASSEMBLY 0
4387
4388int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4389{
4390 int type;
4391 int rem = 0;
4392
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004393 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304394 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4395
4396 if (!skb) {
4397 struct { char type; } *pkt;
4398
4399 /* Start of the frame */
4400 pkt = data;
4401 type = pkt->type;
4402
4403 data++;
4404 count--;
4405 } else
4406 type = bt_cb(skb)->pkt_type;
4407
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004408 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004409 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304410 if (rem < 0)
4411 return rem;
4412
4413 data += (count - rem);
4414 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004415 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304416
4417 return rem;
4418}
4419EXPORT_SYMBOL(hci_recv_stream_fragment);
4420
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421/* ---- Interface to upper protocols ---- */
4422
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423int hci_register_cb(struct hci_cb *cb)
4424{
4425 BT_DBG("%p name %s", cb, cb->name);
4426
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004427 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004429 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004430
4431 return 0;
4432}
4433EXPORT_SYMBOL(hci_register_cb);
4434
4435int hci_unregister_cb(struct hci_cb *cb)
4436{
4437 BT_DBG("%p name %s", cb, cb->name);
4438
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004439 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004441 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442
4443 return 0;
4444}
4445EXPORT_SYMBOL(hci_unregister_cb);
4446
Marcel Holtmann51086992013-10-10 14:54:19 -07004447static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004449 int err;
4450
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004451 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004453 /* Time stamp */
4454 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004456 /* Send copy to monitor */
4457 hci_send_to_monitor(hdev, skb);
4458
4459 if (atomic_read(&hdev->promisc)) {
4460 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004461 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 }
4463
4464 /* Get rid of skb owner, prior to sending to the driver. */
4465 skb_orphan(skb);
4466
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004467 err = hdev->send(hdev, skb);
4468 if (err < 0) {
4469 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4470 kfree_skb(skb);
4471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472}
4473
Johan Hedberg3119ae92013-03-05 20:37:44 +02004474void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4475{
4476 skb_queue_head_init(&req->cmd_q);
4477 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004478 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004479}
4480
4481int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4482{
4483 struct hci_dev *hdev = req->hdev;
4484 struct sk_buff *skb;
4485 unsigned long flags;
4486
4487 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4488
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004489 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004490 * commands queued on the HCI request queue.
4491 */
4492 if (req->err) {
4493 skb_queue_purge(&req->cmd_q);
4494 return req->err;
4495 }
4496
Johan Hedberg3119ae92013-03-05 20:37:44 +02004497 /* Do not allow empty requests */
4498 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004499 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004500
4501 skb = skb_peek_tail(&req->cmd_q);
4502 bt_cb(skb)->req.complete = complete;
4503
4504 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4505 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4506 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4507
4508 queue_work(hdev->workqueue, &hdev->cmd_work);
4509
4510 return 0;
4511}
4512
Marcel Holtmann899de762014-07-11 05:51:58 +02004513bool hci_req_pending(struct hci_dev *hdev)
4514{
4515 return (hdev->req_status == HCI_REQ_PEND);
4516}
4517
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004518static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004519 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520{
4521 int len = HCI_COMMAND_HDR_SIZE + plen;
4522 struct hci_command_hdr *hdr;
4523 struct sk_buff *skb;
4524
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004526 if (!skb)
4527 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528
4529 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004530 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 hdr->plen = plen;
4532
4533 if (plen)
4534 memcpy(skb_put(skb, plen), param, plen);
4535
4536 BT_DBG("skb len %d", skb->len);
4537
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004538 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004539 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004540
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004541 return skb;
4542}
4543
4544/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004545int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4546 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004547{
4548 struct sk_buff *skb;
4549
4550 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4551
4552 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4553 if (!skb) {
4554 BT_ERR("%s no memory for command", hdev->name);
4555 return -ENOMEM;
4556 }
4557
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004558 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004559 * single-command requests.
4560 */
4561 bt_cb(skb)->req.start = true;
4562
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004564 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004565
4566 return 0;
4567}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568
Johan Hedberg71c76a12013-03-05 20:37:46 +02004569/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004570void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4571 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004572{
4573 struct hci_dev *hdev = req->hdev;
4574 struct sk_buff *skb;
4575
4576 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4577
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004578 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004579 * queueing the HCI command. We can simply return.
4580 */
4581 if (req->err)
4582 return;
4583
Johan Hedberg71c76a12013-03-05 20:37:46 +02004584 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4585 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004586 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4587 hdev->name, opcode);
4588 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004589 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004590 }
4591
4592 if (skb_queue_empty(&req->cmd_q))
4593 bt_cb(skb)->req.start = true;
4594
Johan Hedberg02350a72013-04-03 21:50:29 +03004595 bt_cb(skb)->req.event = event;
4596
Johan Hedberg71c76a12013-03-05 20:37:46 +02004597 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004598}
4599
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004600void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4601 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004602{
4603 hci_req_add_ev(req, opcode, plen, param, 0);
4604}
4605
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004607void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608{
4609 struct hci_command_hdr *hdr;
4610
4611 if (!hdev->sent_cmd)
4612 return NULL;
4613
4614 hdr = (void *) hdev->sent_cmd->data;
4615
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004616 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004617 return NULL;
4618
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004619 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620
4621 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4622}
4623
4624/* Send ACL data */
4625static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4626{
4627 struct hci_acl_hdr *hdr;
4628 int len = skb->len;
4629
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004630 skb_push(skb, HCI_ACL_HDR_SIZE);
4631 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004632 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004633 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4634 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004635}
4636
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004637static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004638 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004640 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004641 struct hci_dev *hdev = conn->hdev;
4642 struct sk_buff *list;
4643
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004644 skb->len = skb_headlen(skb);
4645 skb->data_len = 0;
4646
4647 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004648
4649 switch (hdev->dev_type) {
4650 case HCI_BREDR:
4651 hci_add_acl_hdr(skb, conn->handle, flags);
4652 break;
4653 case HCI_AMP:
4654 hci_add_acl_hdr(skb, chan->handle, flags);
4655 break;
4656 default:
4657 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4658 return;
4659 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004660
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004661 list = skb_shinfo(skb)->frag_list;
4662 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 /* Non fragmented */
4664 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4665
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004666 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004667 } else {
4668 /* Fragmented */
4669 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4670
4671 skb_shinfo(skb)->frag_list = NULL;
4672
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004673 /* Queue all fragments atomically. We need to use spin_lock_bh
4674 * here because of 6LoWPAN links, as there this function is
4675 * called from softirq and using normal spin lock could cause
4676 * deadlocks.
4677 */
4678 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004680 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004681
4682 flags &= ~ACL_START;
4683 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684 do {
4685 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004686
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004687 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004688 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689
4690 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4691
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004692 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693 } while (list);
4694
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004695 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004697}
4698
4699void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4700{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004701 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004702
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004703 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004704
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004705 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004707 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709
4710/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004711void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712{
4713 struct hci_dev *hdev = conn->hdev;
4714 struct hci_sco_hdr hdr;
4715
4716 BT_DBG("%s len %d", hdev->name, skb->len);
4717
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004718 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719 hdr.dlen = skb->len;
4720
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004721 skb_push(skb, HCI_SCO_HDR_SIZE);
4722 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004723 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004725 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004726
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004728 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730
4731/* ---- HCI TX task (outgoing data) ---- */
4732
4733/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004734static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4735 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736{
4737 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004738 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004739 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004741 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004743
4744 rcu_read_lock();
4745
4746 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004747 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004749
4750 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4751 continue;
4752
Linus Torvalds1da177e2005-04-16 15:20:36 -07004753 num++;
4754
4755 if (c->sent < min) {
4756 min = c->sent;
4757 conn = c;
4758 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004759
4760 if (hci_conn_num(hdev, type) == num)
4761 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 }
4763
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004764 rcu_read_unlock();
4765
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004767 int cnt, q;
4768
4769 switch (conn->type) {
4770 case ACL_LINK:
4771 cnt = hdev->acl_cnt;
4772 break;
4773 case SCO_LINK:
4774 case ESCO_LINK:
4775 cnt = hdev->sco_cnt;
4776 break;
4777 case LE_LINK:
4778 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4779 break;
4780 default:
4781 cnt = 0;
4782 BT_ERR("Unknown link type");
4783 }
4784
4785 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004786 *quote = q ? q : 1;
4787 } else
4788 *quote = 0;
4789
4790 BT_DBG("conn %p quote %d", conn, *quote);
4791 return conn;
4792}
4793
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004794static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795{
4796 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004797 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798
Ville Tervobae1f5d92011-02-10 22:38:53 -03004799 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004800
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004801 rcu_read_lock();
4802
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004804 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004805 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004806 BT_ERR("%s killing stalled connection %pMR",
4807 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004808 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004809 }
4810 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004811
4812 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004813}
4814
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004815static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4816 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004817{
4818 struct hci_conn_hash *h = &hdev->conn_hash;
4819 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004820 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004821 struct hci_conn *conn;
4822 int cnt, q, conn_num = 0;
4823
4824 BT_DBG("%s", hdev->name);
4825
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004826 rcu_read_lock();
4827
4828 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004829 struct hci_chan *tmp;
4830
4831 if (conn->type != type)
4832 continue;
4833
4834 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4835 continue;
4836
4837 conn_num++;
4838
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004839 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004840 struct sk_buff *skb;
4841
4842 if (skb_queue_empty(&tmp->data_q))
4843 continue;
4844
4845 skb = skb_peek(&tmp->data_q);
4846 if (skb->priority < cur_prio)
4847 continue;
4848
4849 if (skb->priority > cur_prio) {
4850 num = 0;
4851 min = ~0;
4852 cur_prio = skb->priority;
4853 }
4854
4855 num++;
4856
4857 if (conn->sent < min) {
4858 min = conn->sent;
4859 chan = tmp;
4860 }
4861 }
4862
4863 if (hci_conn_num(hdev, type) == conn_num)
4864 break;
4865 }
4866
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004867 rcu_read_unlock();
4868
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004869 if (!chan)
4870 return NULL;
4871
4872 switch (chan->conn->type) {
4873 case ACL_LINK:
4874 cnt = hdev->acl_cnt;
4875 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004876 case AMP_LINK:
4877 cnt = hdev->block_cnt;
4878 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004879 case SCO_LINK:
4880 case ESCO_LINK:
4881 cnt = hdev->sco_cnt;
4882 break;
4883 case LE_LINK:
4884 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4885 break;
4886 default:
4887 cnt = 0;
4888 BT_ERR("Unknown link type");
4889 }
4890
4891 q = cnt / num;
4892 *quote = q ? q : 1;
4893 BT_DBG("chan %p quote %d", chan, *quote);
4894 return chan;
4895}
4896
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004897static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4898{
4899 struct hci_conn_hash *h = &hdev->conn_hash;
4900 struct hci_conn *conn;
4901 int num = 0;
4902
4903 BT_DBG("%s", hdev->name);
4904
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004905 rcu_read_lock();
4906
4907 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004908 struct hci_chan *chan;
4909
4910 if (conn->type != type)
4911 continue;
4912
4913 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4914 continue;
4915
4916 num++;
4917
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004918 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004919 struct sk_buff *skb;
4920
4921 if (chan->sent) {
4922 chan->sent = 0;
4923 continue;
4924 }
4925
4926 if (skb_queue_empty(&chan->data_q))
4927 continue;
4928
4929 skb = skb_peek(&chan->data_q);
4930 if (skb->priority >= HCI_PRIO_MAX - 1)
4931 continue;
4932
4933 skb->priority = HCI_PRIO_MAX - 1;
4934
4935 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004936 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004937 }
4938
4939 if (hci_conn_num(hdev, type) == num)
4940 break;
4941 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004942
4943 rcu_read_unlock();
4944
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004945}
4946
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004947static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4948{
4949 /* Calculate count of blocks used by this packet */
4950 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4951}
4952
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004953static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004955 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 /* ACL tx timeout must be longer than maximum
4957 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004958 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004959 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004960 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004964static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004965{
4966 unsigned int cnt = hdev->acl_cnt;
4967 struct hci_chan *chan;
4968 struct sk_buff *skb;
4969 int quote;
4970
4971 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004972
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004973 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004974 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004975 u32 priority = (skb_peek(&chan->data_q))->priority;
4976 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004977 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004978 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004979
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004980 /* Stop if priority has changed */
4981 if (skb->priority < priority)
4982 break;
4983
4984 skb = skb_dequeue(&chan->data_q);
4985
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004986 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004987 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004988
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004989 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 hdev->acl_last_tx = jiffies;
4991
4992 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004993 chan->sent++;
4994 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995 }
4996 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004997
4998 if (cnt != hdev->acl_cnt)
4999 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000}
5001
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005002static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005003{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005004 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005005 struct hci_chan *chan;
5006 struct sk_buff *skb;
5007 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005008 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005009
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005010 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005011
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005012 BT_DBG("%s", hdev->name);
5013
5014 if (hdev->dev_type == HCI_AMP)
5015 type = AMP_LINK;
5016 else
5017 type = ACL_LINK;
5018
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005019 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005020 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005021 u32 priority = (skb_peek(&chan->data_q))->priority;
5022 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5023 int blocks;
5024
5025 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005026 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005027
5028 /* Stop if priority has changed */
5029 if (skb->priority < priority)
5030 break;
5031
5032 skb = skb_dequeue(&chan->data_q);
5033
5034 blocks = __get_blocks(hdev, skb);
5035 if (blocks > hdev->block_cnt)
5036 return;
5037
5038 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005039 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005040
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005041 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005042 hdev->acl_last_tx = jiffies;
5043
5044 hdev->block_cnt -= blocks;
5045 quote -= blocks;
5046
5047 chan->sent += blocks;
5048 chan->conn->sent += blocks;
5049 }
5050 }
5051
5052 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005053 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005054}
5055
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005056static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005057{
5058 BT_DBG("%s", hdev->name);
5059
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005060 /* No ACL link over BR/EDR controller */
5061 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5062 return;
5063
5064 /* No AMP link over AMP controller */
5065 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005066 return;
5067
5068 switch (hdev->flow_ctl_mode) {
5069 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5070 hci_sched_acl_pkt(hdev);
5071 break;
5072
5073 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5074 hci_sched_acl_blk(hdev);
5075 break;
5076 }
5077}
5078
Linus Torvalds1da177e2005-04-16 15:20:36 -07005079/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005080static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005081{
5082 struct hci_conn *conn;
5083 struct sk_buff *skb;
5084 int quote;
5085
5086 BT_DBG("%s", hdev->name);
5087
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005088 if (!hci_conn_num(hdev, SCO_LINK))
5089 return;
5090
Linus Torvalds1da177e2005-04-16 15:20:36 -07005091 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5092 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5093 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005094 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095
5096 conn->sent++;
5097 if (conn->sent == ~0)
5098 conn->sent = 0;
5099 }
5100 }
5101}
5102
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005103static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005104{
5105 struct hci_conn *conn;
5106 struct sk_buff *skb;
5107 int quote;
5108
5109 BT_DBG("%s", hdev->name);
5110
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005111 if (!hci_conn_num(hdev, ESCO_LINK))
5112 return;
5113
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005114 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5115 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005116 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5117 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005118 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005119
5120 conn->sent++;
5121 if (conn->sent == ~0)
5122 conn->sent = 0;
5123 }
5124 }
5125}
5126
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005127static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005128{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005129 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005130 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005131 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005132
5133 BT_DBG("%s", hdev->name);
5134
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005135 if (!hci_conn_num(hdev, LE_LINK))
5136 return;
5137
Marcel Holtmann4a964402014-07-02 19:10:33 +02005138 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005139 /* LE tx timeout must be longer than maximum
5140 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005141 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005142 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005143 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005144 }
5145
5146 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005147 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005148 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005149 u32 priority = (skb_peek(&chan->data_q))->priority;
5150 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005151 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005152 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005153
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005154 /* Stop if priority has changed */
5155 if (skb->priority < priority)
5156 break;
5157
5158 skb = skb_dequeue(&chan->data_q);
5159
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005160 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005161 hdev->le_last_tx = jiffies;
5162
5163 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005164 chan->sent++;
5165 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005166 }
5167 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005168
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005169 if (hdev->le_pkts)
5170 hdev->le_cnt = cnt;
5171 else
5172 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005173
5174 if (cnt != tmp)
5175 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005176}
5177
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005178static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005180 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181 struct sk_buff *skb;
5182
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005183 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005184 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185
Marcel Holtmann52de5992013-09-03 18:08:38 -07005186 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5187 /* Schedule queues and send stuff to HCI driver */
5188 hci_sched_acl(hdev);
5189 hci_sched_sco(hdev);
5190 hci_sched_esco(hdev);
5191 hci_sched_le(hdev);
5192 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005193
Linus Torvalds1da177e2005-04-16 15:20:36 -07005194 /* Send next queued raw (unknown type) packet */
5195 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005196 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197}
5198
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005199/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200
5201/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005202static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203{
5204 struct hci_acl_hdr *hdr = (void *) skb->data;
5205 struct hci_conn *conn;
5206 __u16 handle, flags;
5207
5208 skb_pull(skb, HCI_ACL_HDR_SIZE);
5209
5210 handle = __le16_to_cpu(hdr->handle);
5211 flags = hci_flags(handle);
5212 handle = hci_handle(handle);
5213
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005214 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005215 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005216
5217 hdev->stat.acl_rx++;
5218
5219 hci_dev_lock(hdev);
5220 conn = hci_conn_hash_lookup_handle(hdev, handle);
5221 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005222
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005224 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005225
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005227 l2cap_recv_acldata(conn, skb, flags);
5228 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005230 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005231 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 }
5233
5234 kfree_skb(skb);
5235}
5236
5237/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005238static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005239{
5240 struct hci_sco_hdr *hdr = (void *) skb->data;
5241 struct hci_conn *conn;
5242 __u16 handle;
5243
5244 skb_pull(skb, HCI_SCO_HDR_SIZE);
5245
5246 handle = __le16_to_cpu(hdr->handle);
5247
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005248 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249
5250 hdev->stat.sco_rx++;
5251
5252 hci_dev_lock(hdev);
5253 conn = hci_conn_hash_lookup_handle(hdev, handle);
5254 hci_dev_unlock(hdev);
5255
5256 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005258 sco_recv_scodata(conn, skb);
5259 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005261 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005262 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 }
5264
5265 kfree_skb(skb);
5266}
5267
Johan Hedberg9238f362013-03-05 20:37:48 +02005268static bool hci_req_is_complete(struct hci_dev *hdev)
5269{
5270 struct sk_buff *skb;
5271
5272 skb = skb_peek(&hdev->cmd_q);
5273 if (!skb)
5274 return true;
5275
5276 return bt_cb(skb)->req.start;
5277}
5278
Johan Hedberg42c6b122013-03-05 20:37:49 +02005279static void hci_resend_last(struct hci_dev *hdev)
5280{
5281 struct hci_command_hdr *sent;
5282 struct sk_buff *skb;
5283 u16 opcode;
5284
5285 if (!hdev->sent_cmd)
5286 return;
5287
5288 sent = (void *) hdev->sent_cmd->data;
5289 opcode = __le16_to_cpu(sent->opcode);
5290 if (opcode == HCI_OP_RESET)
5291 return;
5292
5293 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5294 if (!skb)
5295 return;
5296
5297 skb_queue_head(&hdev->cmd_q, skb);
5298 queue_work(hdev->workqueue, &hdev->cmd_work);
5299}
5300
Johan Hedberg9238f362013-03-05 20:37:48 +02005301void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5302{
5303 hci_req_complete_t req_complete = NULL;
5304 struct sk_buff *skb;
5305 unsigned long flags;
5306
5307 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5308
Johan Hedberg42c6b122013-03-05 20:37:49 +02005309 /* If the completed command doesn't match the last one that was
5310 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005311 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005312 if (!hci_sent_cmd_data(hdev, opcode)) {
5313 /* Some CSR based controllers generate a spontaneous
5314 * reset complete event during init and any pending
5315 * command will never be completed. In such a case we
5316 * need to resend whatever was the last sent
5317 * command.
5318 */
5319 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5320 hci_resend_last(hdev);
5321
Johan Hedberg9238f362013-03-05 20:37:48 +02005322 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005323 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005324
5325 /* If the command succeeded and there's still more commands in
5326 * this request the request is not yet complete.
5327 */
5328 if (!status && !hci_req_is_complete(hdev))
5329 return;
5330
5331 /* If this was the last command in a request the complete
5332 * callback would be found in hdev->sent_cmd instead of the
5333 * command queue (hdev->cmd_q).
5334 */
5335 if (hdev->sent_cmd) {
5336 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005337
5338 if (req_complete) {
5339 /* We must set the complete callback to NULL to
5340 * avoid calling the callback more than once if
5341 * this function gets called again.
5342 */
5343 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5344
Johan Hedberg9238f362013-03-05 20:37:48 +02005345 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005346 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005347 }
5348
5349 /* Remove all pending commands belonging to this request */
5350 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5351 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5352 if (bt_cb(skb)->req.start) {
5353 __skb_queue_head(&hdev->cmd_q, skb);
5354 break;
5355 }
5356
5357 req_complete = bt_cb(skb)->req.complete;
5358 kfree_skb(skb);
5359 }
5360 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5361
5362call_complete:
5363 if (req_complete)
5364 req_complete(hdev, status);
5365}
5366
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005367static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005368{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005369 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 struct sk_buff *skb;
5371
5372 BT_DBG("%s", hdev->name);
5373
Linus Torvalds1da177e2005-04-16 15:20:36 -07005374 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005375 /* Send copy to monitor */
5376 hci_send_to_monitor(hdev, skb);
5377
Linus Torvalds1da177e2005-04-16 15:20:36 -07005378 if (atomic_read(&hdev->promisc)) {
5379 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005380 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 }
5382
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005383 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 kfree_skb(skb);
5385 continue;
5386 }
5387
5388 if (test_bit(HCI_INIT, &hdev->flags)) {
5389 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005390 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 case HCI_ACLDATA_PKT:
5392 case HCI_SCODATA_PKT:
5393 kfree_skb(skb);
5394 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 }
5397
5398 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005399 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005400 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005401 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005402 hci_event_packet(hdev, skb);
5403 break;
5404
5405 case HCI_ACLDATA_PKT:
5406 BT_DBG("%s ACL data packet", hdev->name);
5407 hci_acldata_packet(hdev, skb);
5408 break;
5409
5410 case HCI_SCODATA_PKT:
5411 BT_DBG("%s SCO data packet", hdev->name);
5412 hci_scodata_packet(hdev, skb);
5413 break;
5414
5415 default:
5416 kfree_skb(skb);
5417 break;
5418 }
5419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420}
5421
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005422static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005424 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 struct sk_buff *skb;
5426
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005427 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5428 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429
Linus Torvalds1da177e2005-04-16 15:20:36 -07005430 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005431 if (atomic_read(&hdev->cmd_cnt)) {
5432 skb = skb_dequeue(&hdev->cmd_q);
5433 if (!skb)
5434 return;
5435
Wei Yongjun7585b972009-02-25 18:29:52 +08005436 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005438 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005439 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005441 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005442 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005443 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005444 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005445 schedule_delayed_work(&hdev->cmd_timer,
5446 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005447 } else {
5448 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005449 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 }
5451 }
5452}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005453
5454void hci_req_add_le_scan_disable(struct hci_request *req)
5455{
5456 struct hci_cp_le_set_scan_enable cp;
5457
5458 memset(&cp, 0, sizeof(cp));
5459 cp.enable = LE_SCAN_DISABLE;
5460 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5461}
Andre Guedesa4790db2014-02-26 20:21:47 -03005462
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005463static void add_to_white_list(struct hci_request *req,
5464 struct hci_conn_params *params)
5465{
5466 struct hci_cp_le_add_to_white_list cp;
5467
5468 cp.bdaddr_type = params->addr_type;
5469 bacpy(&cp.bdaddr, &params->addr);
5470
5471 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5472}
5473
5474static u8 update_white_list(struct hci_request *req)
5475{
5476 struct hci_dev *hdev = req->hdev;
5477 struct hci_conn_params *params;
5478 struct bdaddr_list *b;
5479 uint8_t white_list_entries = 0;
5480
5481 /* Go through the current white list programmed into the
5482 * controller one by one and check if that address is still
5483 * in the list of pending connections or list of devices to
5484 * report. If not present in either list, then queue the
5485 * command to remove it from the controller.
5486 */
5487 list_for_each_entry(b, &hdev->le_white_list, list) {
5488 struct hci_cp_le_del_from_white_list cp;
5489
5490 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5491 &b->bdaddr, b->bdaddr_type) ||
5492 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5493 &b->bdaddr, b->bdaddr_type)) {
5494 white_list_entries++;
5495 continue;
5496 }
5497
5498 cp.bdaddr_type = b->bdaddr_type;
5499 bacpy(&cp.bdaddr, &b->bdaddr);
5500
5501 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5502 sizeof(cp), &cp);
5503 }
5504
5505 /* Since all no longer valid white list entries have been
5506 * removed, walk through the list of pending connections
5507 * and ensure that any new device gets programmed into
5508 * the controller.
5509 *
5510 * If the list of the devices is larger than the list of
5511 * available white list entries in the controller, then
5512 * just abort and return filer policy value to not use the
5513 * white list.
5514 */
5515 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5516 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5517 &params->addr, params->addr_type))
5518 continue;
5519
5520 if (white_list_entries >= hdev->le_white_list_size) {
5521 /* Select filter policy to accept all advertising */
5522 return 0x00;
5523 }
5524
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005525 if (hci_find_irk_by_addr(hdev, &params->addr,
5526 params->addr_type)) {
5527 /* White list can not be used with RPAs */
5528 return 0x00;
5529 }
5530
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005531 white_list_entries++;
5532 add_to_white_list(req, params);
5533 }
5534
5535 /* After adding all new pending connections, walk through
5536 * the list of pending reports and also add these to the
5537 * white list if there is still space.
5538 */
5539 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5540 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5541 &params->addr, params->addr_type))
5542 continue;
5543
5544 if (white_list_entries >= hdev->le_white_list_size) {
5545 /* Select filter policy to accept all advertising */
5546 return 0x00;
5547 }
5548
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005549 if (hci_find_irk_by_addr(hdev, &params->addr,
5550 params->addr_type)) {
5551 /* White list can not be used with RPAs */
5552 return 0x00;
5553 }
5554
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005555 white_list_entries++;
5556 add_to_white_list(req, params);
5557 }
5558
5559 /* Select filter policy to use white list */
5560 return 0x01;
5561}
5562
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005563void hci_req_add_le_passive_scan(struct hci_request *req)
5564{
5565 struct hci_cp_le_set_scan_param param_cp;
5566 struct hci_cp_le_set_scan_enable enable_cp;
5567 struct hci_dev *hdev = req->hdev;
5568 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005569 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005570
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005571 /* Set require_privacy to false since no SCAN_REQ are send
5572 * during passive scanning. Not using an unresolvable address
5573 * here is important so that peer devices using direct
5574 * advertising with our address will be correctly reported
5575 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005576 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005577 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005578 return;
5579
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005580 /* Adding or removing entries from the white list must
5581 * happen before enabling scanning. The controller does
5582 * not allow white list modification while scanning.
5583 */
5584 filter_policy = update_white_list(req);
5585
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005586 memset(&param_cp, 0, sizeof(param_cp));
5587 param_cp.type = LE_SCAN_PASSIVE;
5588 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5589 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5590 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005591 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005592 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5593 &param_cp);
5594
5595 memset(&enable_cp, 0, sizeof(enable_cp));
5596 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005597 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005598 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5599 &enable_cp);
5600}
5601
Andre Guedesa4790db2014-02-26 20:21:47 -03005602static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5603{
5604 if (status)
5605 BT_DBG("HCI request failed to update background scanning: "
5606 "status 0x%2.2x", status);
5607}
5608
5609/* This function controls the background scanning based on hdev->pend_le_conns
5610 * list. If there are pending LE connection we start the background scanning,
5611 * otherwise we stop it.
5612 *
5613 * This function requires the caller holds hdev->lock.
5614 */
5615void hci_update_background_scan(struct hci_dev *hdev)
5616{
Andre Guedesa4790db2014-02-26 20:21:47 -03005617 struct hci_request req;
5618 struct hci_conn *conn;
5619 int err;
5620
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005621 if (!test_bit(HCI_UP, &hdev->flags) ||
5622 test_bit(HCI_INIT, &hdev->flags) ||
5623 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005624 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005625 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005626 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005627 return;
5628
Johan Hedberga70f4b52014-07-07 15:19:50 +03005629 /* No point in doing scanning if LE support hasn't been enabled */
5630 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5631 return;
5632
Johan Hedbergae23ada2014-07-07 13:24:59 +03005633 /* If discovery is active don't interfere with it */
5634 if (hdev->discovery.state != DISCOVERY_STOPPED)
5635 return;
5636
Andre Guedesa4790db2014-02-26 20:21:47 -03005637 hci_req_init(&req, hdev);
5638
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005639 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005640 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005641 /* If there is no pending LE connections or devices
5642 * to be scanned for, we should stop the background
5643 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005644 */
5645
5646 /* If controller is not scanning we are done. */
5647 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5648 return;
5649
5650 hci_req_add_le_scan_disable(&req);
5651
5652 BT_DBG("%s stopping background scanning", hdev->name);
5653 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005654 /* If there is at least one pending LE connection, we should
5655 * keep the background scan running.
5656 */
5657
Andre Guedesa4790db2014-02-26 20:21:47 -03005658 /* If controller is connecting, we should not start scanning
5659 * since some controllers are not able to scan and connect at
5660 * the same time.
5661 */
5662 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5663 if (conn)
5664 return;
5665
Andre Guedes4340a122014-03-10 18:26:24 -03005666 /* If controller is currently scanning, we stop it to ensure we
5667 * don't miss any advertising (due to duplicates filter).
5668 */
5669 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5670 hci_req_add_le_scan_disable(&req);
5671
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005672 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005673
5674 BT_DBG("%s starting background scanning", hdev->name);
5675 }
5676
5677 err = hci_req_run(&req, update_background_scan_complete);
5678 if (err)
5679 BT_ERR("Failed to run HCI request: err %d", err);
5680}
Johan Hedberg432df052014-08-01 11:13:31 +03005681
Johan Hedberg22f433d2014-08-01 11:13:32 +03005682static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5683{
5684 struct bdaddr_list *b;
5685
5686 list_for_each_entry(b, &hdev->whitelist, list) {
5687 struct hci_conn *conn;
5688
5689 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5690 if (!conn)
5691 return true;
5692
5693 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5694 return true;
5695 }
5696
5697 return false;
5698}
5699
Johan Hedberg432df052014-08-01 11:13:31 +03005700void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5701{
5702 u8 scan;
5703
5704 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5705 return;
5706
5707 if (!hdev_is_powered(hdev))
5708 return;
5709
5710 if (mgmt_powering_down(hdev))
5711 return;
5712
5713 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005714 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005715 scan = SCAN_PAGE;
5716 else
5717 scan = SCAN_DISABLED;
5718
5719 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5720 return;
5721
5722 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5723 scan |= SCAN_INQUIRY;
5724
5725 if (req)
5726 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5727 else
5728 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5729}