blob: 90ea0b7670d237db3c084600cfdf776199f801be [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
Marcel Holtmann041000b2013-10-17 12:02:31 -0700326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
Marcel Holtmann111902f2014-06-21 04:53:17 +0200371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 return -EALREADY;
398
Marcel Holtmann111902f2014-06-21 04:53:17 +0200399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700437 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700496 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700524 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
Marcel Holtmannac345812014-02-23 12:44:25 -0800600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200603 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800609
Johan Hedberga1f4c312014-02-27 14:05:41 +0200610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800611 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700679{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700682
Marcel Holtmann111902f2014-06-21 04:53:17 +0200683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687}
688
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
692{
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
697
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
700
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700706 return -EINVAL;
707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700710
Marcel Holtmann111902f2014-06-21 04:53:17 +0200711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800712
713 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700714}
715
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
Marcel Holtmann92202182013-10-18 16:38:10 -0700722
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
Marcel Holtmann3698d702014-02-18 21:54:49 -0800748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
Johan Hedbergadae20c2014-11-13 14:37:48 +0200751 struct smp_irk *irk;
Marcel Holtmann3698d702014-02-18 21:54:49 -0800752
Johan Hedbergadae20c2014-11-13 14:37:48 +0200753 rcu_read_lock();
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Marcel Holtmann3698d702014-02-18 21:54:49 -0800755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
758 }
Johan Hedbergadae20c2014-11-13 14:37:48 +0200759 rcu_read_unlock();
Marcel Holtmann3698d702014-02-18 21:54:49 -0800760
761 return 0;
762}
763
764static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765{
766 return single_open(file, identity_resolving_keys_show,
767 inode->i_private);
768}
769
770static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775};
776
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777static int long_term_keys_show(struct seq_file *f, void *ptr)
778{
779 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200780 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700781
Johan Hedberg970d0f12014-11-13 14:37:47 +0200782 rcu_read_lock();
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700785 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800787 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200788 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700789
790 return 0;
791}
792
793static int long_term_keys_open(struct inode *inode, struct file *file)
794{
795 return single_open(file, long_term_keys_show, inode->i_private);
796}
797
798static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803};
804
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700805static int conn_min_interval_set(void *data, u64 val)
806{
807 struct hci_dev *hdev = data;
808
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810 return -EINVAL;
811
812 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700813 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700814 hci_dev_unlock(hdev);
815
816 return 0;
817}
818
819static int conn_min_interval_get(void *data, u64 *val)
820{
821 struct hci_dev *hdev = data;
822
823 hci_dev_lock(hdev);
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
826
827 return 0;
828}
829
830DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
832
833static int conn_max_interval_set(void *data, u64 val)
834{
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700841 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700842 hci_dev_unlock(hdev);
843
844 return 0;
845}
846
847static int conn_max_interval_get(void *data, u64 *val)
848{
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856}
857
858DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
860
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200861static int conn_latency_set(void *data, u64 val)
862{
863 struct hci_dev *hdev = data;
864
865 if (val > 0x01f3)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
869 hdev->le_conn_latency = val;
870 hci_dev_unlock(hdev);
871
872 return 0;
873}
874
875static int conn_latency_get(void *data, u64 *val)
876{
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_latency;
881 hci_dev_unlock(hdev);
882
883 return 0;
884}
885
886DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
888
Marcel Holtmannf1649572014-06-30 12:34:38 +0200889static int supervision_timeout_set(void *data, u64 val)
890{
891 struct hci_dev *hdev = data;
892
893 if (val < 0x000a || val > 0x0c80)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901}
902
903static int supervision_timeout_get(void *data, u64 *val)
904{
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
910
911 return 0;
912}
913
914DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
916
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800917static int adv_channel_map_set(void *data, u64 val)
918{
919 struct hci_dev *hdev = data;
920
921 if (val < 0x01 || val > 0x07)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929}
930
931static int adv_channel_map_get(void *data, u64 *val)
932{
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
938
939 return 0;
940}
941
942DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
944
Georg Lukas729a1052014-07-26 13:59:58 +0200945static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200946{
Georg Lukas729a1052014-07-26 13:59:58 +0200947 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200948
Georg Lukas729a1052014-07-26 13:59:58 +0200949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200950 return -EINVAL;
951
Andre Guedes7d474e02014-02-26 20:21:54 -0300952 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200953 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300954 hci_dev_unlock(hdev);
955
956 return 0;
957}
958
Georg Lukas729a1052014-07-26 13:59:58 +0200959static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300960{
Georg Lukas729a1052014-07-26 13:59:58 +0200961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
966
967 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300968}
969
Georg Lukas729a1052014-07-26 13:59:58 +0200970DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
972
973static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300974{
Georg Lukas729a1052014-07-26 13:59:58 +0200975 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300976
Georg Lukas729a1052014-07-26 13:59:58 +0200977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300978 return -EINVAL;
979
Georg Lukas729a1052014-07-26 13:59:58 +0200980 hci_dev_lock(hdev);
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300983
Georg Lukas729a1052014-07-26 13:59:58 +0200984 return 0;
985}
Andre Guedes7d474e02014-02-26 20:21:54 -0300986
Georg Lukas729a1052014-07-26 13:59:58 +0200987static int adv_max_interval_get(void *data, u64 *val)
988{
989 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300990
Georg Lukas729a1052014-07-26 13:59:58 +0200991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300994
Georg Lukas729a1052014-07-26 13:59:58 +0200995 return 0;
996}
Andre Guedes7d474e02014-02-26 20:21:54 -0300997
Georg Lukas729a1052014-07-26 13:59:58 +0200998DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001000
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001001static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001002{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001003 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001004 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001005 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001006
Andre Guedes7d474e02014-02-26 20:21:54 -03001007 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001012 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001015
Andre Guedes7d474e02014-02-26 20:21:54 -03001016 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001017}
1018
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001019static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001020{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001021 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001022}
1023
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001024static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001026 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001027 .llseek = seq_lseek,
1028 .release = single_release,
1029};
1030
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031/* ---- HCI requests ---- */
1032
Johan Hedberg42c6b122013-03-05 20:37:49 +02001033static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1041 }
1042}
1043
1044static void hci_req_cancel(struct hci_dev *hdev, int err)
1045{
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1052 }
1053}
1054
Fengguang Wu77a63e02013-04-20 16:24:31 +03001055static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001057{
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1061
1062 hci_dev_lock(hdev);
1063
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1066
1067 hci_dev_unlock(hdev);
1068
1069 if (!skb)
1070 return ERR_PTR(-ENODATA);
1071
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1074 goto failed;
1075 }
1076
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001080 if (event) {
1081 if (hdr->evt != event)
1082 goto failed;
1083 return skb;
1084 }
1085
Johan Hedberg75e84b72013-04-02 13:35:04 +03001086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088 goto failed;
1089 }
1090
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1093 goto failed;
1094 }
1095
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1098
1099 if (opcode == __le16_to_cpu(ev->opcode))
1100 return skb;
1101
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1104
1105failed:
1106 kfree_skb(skb);
1107 return ERR_PTR(-ENODATA);
1108}
1109
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001110struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001111 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001112{
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1115 int err = 0;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_req_init(&req, hdev);
1120
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001121 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001122
1123 hdev->req_status = HCI_REQ_PEND;
1124
Johan Hedberg75e84b72013-04-02 13:35:04 +03001125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
Chan-yeol Park039fada2014-10-31 14:23:06 +09001128 err = hci_req_run(&req, hci_req_sync_complete);
1129 if (err < 0) {
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
1131 return ERR_PTR(err);
1132 }
1133
Johan Hedberg75e84b72013-04-02 13:35:04 +03001134 schedule_timeout(timeout);
1135
1136 remove_wait_queue(&hdev->req_wait_q, &wait);
1137
1138 if (signal_pending(current))
1139 return ERR_PTR(-EINTR);
1140
1141 switch (hdev->req_status) {
1142 case HCI_REQ_DONE:
1143 err = -bt_to_errno(hdev->req_result);
1144 break;
1145
1146 case HCI_REQ_CANCELED:
1147 err = -hdev->req_result;
1148 break;
1149
1150 default:
1151 err = -ETIMEDOUT;
1152 break;
1153 }
1154
1155 hdev->req_status = hdev->req_result = 0;
1156
1157 BT_DBG("%s end: err %d", hdev->name, err);
1158
1159 if (err < 0)
1160 return ERR_PTR(err);
1161
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001162 return hci_get_cmd_complete(hdev, opcode, event);
1163}
1164EXPORT_SYMBOL(__hci_cmd_sync_ev);
1165
1166struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001167 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001168{
1169 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001170}
1171EXPORT_SYMBOL(__hci_cmd_sync);
1172
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001174static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001175 void (*func)(struct hci_request *req,
1176 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001177 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001179 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 DECLARE_WAITQUEUE(wait, current);
1181 int err = 0;
1182
1183 BT_DBG("%s start", hdev->name);
1184
Johan Hedberg42c6b122013-03-05 20:37:49 +02001185 hci_req_init(&req, hdev);
1186
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 hdev->req_status = HCI_REQ_PEND;
1188
Johan Hedberg42c6b122013-03-05 20:37:49 +02001189 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001190
Chan-yeol Park039fada2014-10-31 14:23:06 +09001191 add_wait_queue(&hdev->req_wait_q, &wait);
1192 set_current_state(TASK_INTERRUPTIBLE);
1193
Johan Hedberg42c6b122013-03-05 20:37:49 +02001194 err = hci_req_run(&req, hci_req_sync_complete);
1195 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001196 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001197
Chan-yeol Park039fada2014-10-31 14:23:06 +09001198 remove_wait_queue(&hdev->req_wait_q, &wait);
1199
Andre Guedes920c8302013-03-08 11:20:15 -03001200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001204 */
Andre Guedes920c8302013-03-08 11:20:15 -03001205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001209 }
1210
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001220 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001230 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
Johan Hedberga5040ef2011-01-10 13:28:59 +02001232 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
Johan Hedberg01178cd2013-03-05 20:37:41 +02001239static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243{
1244 int ret;
1245
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 /* Serialize all requests */
1250 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001251 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
Johan Hedberg42c6b122013-03-05 20:37:49 +02001257static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001259 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264}
1265
Johan Hedberg42c6b122013-03-05 20:37:49 +02001266static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001273 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001275
1276 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278}
1279
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001281{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001283
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001286
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001293 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001295
1296 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001298
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001304}
1305
Johan Hedberg42c6b122013-03-05 20:37:49 +02001306static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001314 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001318 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 break;
1320
1321 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001322 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001329}
1330
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001333 struct hci_dev *hdev = req->hdev;
1334
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340
1341 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
Johan Hedberg2177bab2013-03-05 20:37:43 +02001356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359
1360 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001361 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001363
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001371}
1372
Johan Hedberg42c6b122013-03-05 20:37:49 +02001373static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001375 struct hci_dev *hdev = req->hdev;
1376
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379
1380 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
Johan Hedberg2177bab2013-03-05 20:37:43 +02001386 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001426{
1427 u8 mode;
1428
Johan Hedberg42c6b122013-03-05 20:37:49 +02001429 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001430
Johan Hedberg42c6b122013-03-05 20:37:49 +02001431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001432}
1433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001436 struct hci_dev *hdev = req->hdev;
1437
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
Johan Hedberg42c6b122013-03-05 20:37:49 +02001508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001509}
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001513 struct hci_dev *hdev = req->hdev;
1514
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001519
1520 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001528
1529 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
Johan Hedberg2177bab2013-03-05 20:37:43 +02001538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554
1555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570 }
1571}
1572
Johan Hedberg42c6b122013-03-05 20:37:49 +02001573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001574{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001590}
1591
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001595 struct hci_cp_write_le_host_supported cp;
1596
Johan Hedbergc73eee92013-04-19 18:35:21 +03001597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
Johan Hedberg2177bab2013-03-05 20:37:43 +02001601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001605 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001611}
1612
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001621 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001631 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001638 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001640 events[2] |= 0x80;
1641
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
Johan Hedberg42c6b122013-03-05 20:37:49 +02001645static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001647 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001648 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001649
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001650 hci_setup_event_mask(req);
1651
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001664 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
Johan Hedberg2177bab2013-03-05 20:37:43 +02001675 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001676 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001677
Andre Guedes9193c6e2014-07-01 18:10:09 -03001678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
Andre Guedes9193c6e2014-07-01 18:10:09 -03001695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
Johan Hedberg42c6b122013-03-05 20:37:49 +02001703 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001704 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001714}
1715
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
Marcel Holtmann109e3192014-07-23 19:24:56 +02001724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001732 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001733 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001735
1736 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001737 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740 u8 support = 0x01;
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1743 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001744}
1745
Johan Hedberg2177bab2013-03-05 20:37:43 +02001746static int __hci_init(struct hci_dev *hdev)
1747{
1748 int err;
1749
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751 if (err < 0)
1752 return err;
1753
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1756 */
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759 &dut_mode_fops);
1760 }
1761
Johan Hedberg2177bab2013-03-05 20:37:43 +02001762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1764 * first stage init.
1765 */
1766 if (hdev->dev_type != HCI_BREDR)
1767 return 0;
1768
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770 if (err < 0)
1771 return err;
1772
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774 if (err < 0)
1775 return err;
1776
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778 if (err < 0)
1779 return err;
1780
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1783 */
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785 return 0;
1786
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1803
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001813 }
1814
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001815 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001822 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001823
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1831 }
1832
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001833 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1842
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1846 */
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1851
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876 hdev->debugfs,
1877 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001878
Johan Hedberg711eafe2014-08-08 09:32:52 +03001879 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001880 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001881
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001882 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001883}
1884
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001885static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886{
1887 struct hci_dev *hdev = req->hdev;
1888
1889 BT_DBG("%s %ld", hdev->name, opt);
1890
1891 /* Reset */
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1894
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901}
1902
1903static int __hci_unconf_init(struct hci_dev *hdev)
1904{
1905 int err;
1906
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 return 0;
1909
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911 if (err < 0)
1912 return err;
1913
1914 return 0;
1915}
1916
Johan Hedberg42c6b122013-03-05 20:37:49 +02001917static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918{
1919 __u8 scan = opt;
1920
Johan Hedberg42c6b122013-03-05 20:37:49 +02001921 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
1923 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925}
1926
Johan Hedberg42c6b122013-03-05 20:37:49 +02001927static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928{
1929 __u8 auth = opt;
1930
Johan Hedberg42c6b122013-03-05 20:37:49 +02001931 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935}
1936
Johan Hedberg42c6b122013-03-05 20:37:49 +02001937static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938{
1939 __u8 encrypt = opt;
1940
Johan Hedberg42c6b122013-03-05 20:37:49 +02001941 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001943 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945}
1946
Johan Hedberg42c6b122013-03-05 20:37:49 +02001947static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001948{
1949 __le16 policy = cpu_to_le16(opt);
1950
Johan Hedberg42c6b122013-03-05 20:37:49 +02001951 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001952
1953 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001955}
1956
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001957/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958 * Device is held on return. */
1959struct hci_dev *hci_dev_get(int index)
1960{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001961 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
1963 BT_DBG("%d", index);
1964
1965 if (index < 0)
1966 return NULL;
1967
1968 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001969 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1972 break;
1973 }
1974 }
1975 read_unlock(&hci_dev_list_lock);
1976 return hdev;
1977}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978
1979/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001980
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001981bool hci_discovery_active(struct hci_dev *hdev)
1982{
1983 struct discovery_state *discov = &hdev->discovery;
1984
Andre Guedes6fbe1952012-02-03 17:47:58 -03001985 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001986 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001987 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001988 return true;
1989
Andre Guedes6fbe1952012-02-03 17:47:58 -03001990 default:
1991 return false;
1992 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001993}
1994
Johan Hedbergff9ef572012-01-04 14:23:45 +02001995void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001997 int old_state = hdev->discovery.state;
1998
Johan Hedbergff9ef572012-01-04 14:23:45 +02001999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002001 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002002 return;
2003
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002004 hdev->discovery.state = state;
2005
Johan Hedbergff9ef572012-01-04 14:23:45 +02002006 switch (state) {
2007 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002008 hci_update_background_scan(hdev);
2009
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002010 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002011 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002012 break;
2013 case DISCOVERY_STARTING:
2014 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002015 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002016 mgmt_discovering(hdev, 1);
2017 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002018 case DISCOVERY_RESOLVING:
2019 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002020 case DISCOVERY_STOPPING:
2021 break;
2022 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002023}
2024
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002025void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026{
Johan Hedberg30883512012-01-04 14:16:21 +02002027 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002028 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029
Johan Hedberg561aafb2012-01-04 13:31:59 +02002030 list_for_each_entry_safe(p, n, &cache->all, all) {
2031 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002032 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002034
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037}
2038
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002039struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041{
Johan Hedberg30883512012-01-04 14:16:21 +02002042 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 struct inquiry_entry *e;
2044
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002045 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Johan Hedberg561aafb2012-01-04 13:31:59 +02002047 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002049 return e;
2050 }
2051
2052 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053}
2054
Johan Hedberg561aafb2012-01-04 13:31:59 +02002055struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002056 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002057{
Johan Hedberg30883512012-01-04 14:16:21 +02002058 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002059 struct inquiry_entry *e;
2060
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002061 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002062
2063 list_for_each_entry(e, &cache->unknown, list) {
2064 if (!bacmp(&e->data.bdaddr, bdaddr))
2065 return e;
2066 }
2067
2068 return NULL;
2069}
2070
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002071struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002072 bdaddr_t *bdaddr,
2073 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002074{
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2077
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002079
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082 return e;
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2084 return e;
2085 }
2086
2087 return NULL;
2088}
2089
Johan Hedberga3d4e202012-01-09 00:53:02 +02002090void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002091 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002092{
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2096
2097 list_del(&ie->list);
2098
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002101 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002102 break;
2103 pos = &p->list;
2104 }
2105
2106 list_add(&ie->list, pos);
2107}
2108
Marcel Holtmannaf589252014-07-01 14:11:20 +02002109u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111{
Johan Hedberg30883512012-01-04 14:16:21 +02002112 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002113 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002114 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
Szymon Janc2b2fec42012-11-20 11:38:54 +01002118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
Marcel Holtmannaf589252014-07-01 14:11:20 +02002120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002122
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002124 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002127
Johan Hedberga3d4e202012-01-09 00:53:02 +02002128 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002129 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2132 }
2133
Johan Hedberg561aafb2012-01-04 13:31:59 +02002134 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002135 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002136
Johan Hedberg561aafb2012-01-04 13:31:59 +02002137 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002139 if (!ie) {
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141 goto done;
2142 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002143
2144 list_add(&ie->all, &cache->all);
2145
2146 if (name_known) {
2147 ie->name_state = NAME_KNOWN;
2148 } else {
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2151 }
2152
2153update:
2154 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002155 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 }
2159
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002163
2164 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002166
Marcel Holtmannaf589252014-07-01 14:11:20 +02002167done:
2168 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169}
2170
2171static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172{
Johan Hedberg30883512012-01-04 14:16:21 +02002173 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2176 int copied = 0;
2177
Johan Hedberg561aafb2012-01-04 13:31:59 +02002178 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002180
2181 if (copied >= num)
2182 break;
2183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002192 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 }
2194
2195 BT_DBG("cache %p, copied %d", cache, copied);
2196 return copied;
2197}
2198
Johan Hedberg42c6b122013-03-05 20:37:49 +02002199static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002202 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 struct hci_cp_inquiry cp;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2208 return;
2209
2210 /* Start Inquiry */
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215}
2216
2217int hci_inquiry(void __user *arg)
2218{
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2223 long timeo;
2224 __u8 *buf;
2225
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2227 return -EFAULT;
2228
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002229 hdev = hci_dev_get(ir.dev_id);
2230 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 return -ENODEV;
2232
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234 err = -EBUSY;
2235 goto done;
2236 }
2237
Marcel Holtmann4a964402014-07-02 19:10:33 +02002238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002239 err = -EOPNOTSUPP;
2240 goto done;
2241 }
2242
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002243 if (hdev->dev_type != HCI_BREDR) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
Johan Hedberg56f87902013-10-02 13:43:13 +03002248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249 err = -EOPNOTSUPP;
2250 goto done;
2251 }
2252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002253 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002256 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 do_inquiry = 1;
2258 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002259 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Marcel Holtmann04837f62006-07-03 10:02:33 +02002261 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002262
2263 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002266 if (err < 0)
2267 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002268
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2271 */
NeilBrown74316202014-07-07 15:16:04 +10002272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002273 TASK_INTERRUPTIBLE))
2274 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002275 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002277 /* for unlimited number of responses we will use buffer with
2278 * 255 entries
2279 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2284 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002286 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 err = -ENOMEM;
2288 goto done;
2289 }
2290
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002291 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298 ptr += sizeof(ir);
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002300 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002302 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 err = -EFAULT;
2304
2305 kfree(buf);
2306
2307done:
2308 hci_dev_put(hdev);
2309 return err;
2310}
2311
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002312static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 int ret = 0;
2315
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 BT_DBG("%s %p", hdev->name, hdev);
2317
2318 hci_req_lock(hdev);
2319
Johan Hovold94324962012-03-15 14:48:41 +01002320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321 ret = -ENODEV;
2322 goto done;
2323 }
2324
Marcel Holtmannd603b762014-07-06 12:11:14 +02002325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2329 */
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331 ret = -ERFKILL;
2332 goto done;
2333 }
2334
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2338 * or not.
2339 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2342 * available.
2343 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2346 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2352 goto done;
2353 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002354 }
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 if (test_bit(HCI_UP, &hdev->flags)) {
2357 ret = -EALREADY;
2358 goto done;
2359 }
2360
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 if (hdev->open(hdev)) {
2362 ret = -EIO;
2363 goto done;
2364 }
2365
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2368
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370 if (hdev->setup)
2371 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002372
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2375 *
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2378 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002382
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2386 *
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2390 */
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002393 }
2394
Marcel Holtmann9713c172014-07-06 12:11:15 +02002395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2399 * on procedure.
2400 */
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404 else
2405 ret = -EADDRNOTAVAIL;
2406 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002407
2408 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002411 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 }
2413
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002414 clear_bit(HCI_INIT, &hdev->flags);
2415
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 if (!ret) {
2417 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002425 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002426 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002427 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002428 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002429 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002430 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002432 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002433 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002434 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2438
2439 if (hdev->flush)
2440 hdev->flush(hdev);
2441
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2445 }
2446
2447 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002448 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 }
2450
2451done:
2452 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 return ret;
2454}
2455
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002456/* ---- HCI ioctl helpers ---- */
2457
2458int hci_dev_open(__u16 dev)
2459{
2460 struct hci_dev *hdev;
2461 int err;
2462
2463 hdev = hci_dev_get(dev);
2464 if (!hdev)
2465 return -ENODEV;
2466
Marcel Holtmann4a964402014-07-02 19:10:33 +02002467 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2470 * possible.
2471 *
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2474 * open the device.
2475 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478 err = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
Johan Hedberge1d08f42013-10-01 22:44:50 +03002482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2485 * completed.
2486 */
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2489
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2493 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002494 flush_workqueue(hdev->req_workqueue);
2495
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002496 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2501 */
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002505
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002506 err = hci_dev_do_open(hdev);
2507
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002508done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002509 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002510 return err;
2511}
2512
Johan Hedbergd7347f32014-07-04 12:37:23 +03002513/* This function requires the caller holds hdev->lock */
2514static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515{
2516 struct hci_conn_params *p;
2517
Johan Hedbergf161dd42014-08-15 21:06:54 +03002518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2519 if (p->conn) {
2520 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002521 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002522 p->conn = NULL;
2523 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002524 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002525 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002526
2527 BT_DBG("All LE pending actions cleared");
2528}
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530static int hci_dev_do_close(struct hci_dev *hdev)
2531{
2532 BT_DBG("%s %p", hdev->name, hdev);
2533
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002534 cancel_delayed_work(&hdev->power_off);
2535
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 hci_req_cancel(hdev, ENODEV);
2537 hci_req_lock(hdev);
2538
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002540 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 hci_req_unlock(hdev);
2542 return 0;
2543 }
2544
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002547 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002549 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002550 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002551 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002554 }
2555
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002557 cancel_delayed_work(&hdev->service_cache);
2558
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002559 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002560
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002563
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002564 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002565 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002566 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002567 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002568 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570 hci_notify(hdev, HCI_DEV_DOWN);
2571
2572 if (hdev->flush)
2573 hdev->flush(hdev);
2574
2575 /* Reset device */
2576 skb_queue_purge(&hdev->cmd_q);
2577 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002578 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2579 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002580 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002582 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 clear_bit(HCI_INIT, &hdev->flags);
2584 }
2585
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002586 /* flush cmd work */
2587 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
2589 /* Drop queues */
2590 skb_queue_purge(&hdev->rx_q);
2591 skb_queue_purge(&hdev->cmd_q);
2592 skb_queue_purge(&hdev->raw_q);
2593
2594 /* Drop last sent command */
2595 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002596 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 kfree_skb(hdev->sent_cmd);
2598 hdev->sent_cmd = NULL;
2599 }
2600
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002601 kfree_skb(hdev->recv_evt);
2602 hdev->recv_evt = NULL;
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 /* After this point our queues are empty
2605 * and no tasks are scheduled. */
2606 hdev->close(hdev);
2607
Johan Hedberg35b973c2013-03-15 17:06:59 -05002608 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002609 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002610 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2611
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002612 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2613 if (hdev->dev_type == HCI_BREDR) {
2614 hci_dev_lock(hdev);
2615 mgmt_powered(hdev, 0);
2616 hci_dev_unlock(hdev);
2617 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002618 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002619
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002620 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002621 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002622
Johan Hedberge59fda82012-02-22 18:11:53 +02002623 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002624 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002625 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002626
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 hci_req_unlock(hdev);
2628
2629 hci_dev_put(hdev);
2630 return 0;
2631}
2632
2633int hci_dev_close(__u16 dev)
2634{
2635 struct hci_dev *hdev;
2636 int err;
2637
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002638 hdev = hci_dev_get(dev);
2639 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002641
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002642 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2643 err = -EBUSY;
2644 goto done;
2645 }
2646
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002647 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2648 cancel_delayed_work(&hdev->power_off);
2649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002651
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002652done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 hci_dev_put(hdev);
2654 return err;
2655}
2656
2657int hci_dev_reset(__u16 dev)
2658{
2659 struct hci_dev *hdev;
2660 int ret = 0;
2661
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002662 hdev = hci_dev_get(dev);
2663 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664 return -ENODEV;
2665
2666 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
Marcel Holtmann808a0492013-08-26 20:57:58 -07002668 if (!test_bit(HCI_UP, &hdev->flags)) {
2669 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002671 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002673 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2674 ret = -EBUSY;
2675 goto done;
2676 }
2677
Marcel Holtmann4a964402014-07-02 19:10:33 +02002678 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002679 ret = -EOPNOTSUPP;
2680 goto done;
2681 }
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683 /* Drop queues */
2684 skb_queue_purge(&hdev->rx_q);
2685 skb_queue_purge(&hdev->cmd_q);
2686
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002687 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002688 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002690 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 if (hdev->flush)
2693 hdev->flush(hdev);
2694
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002695 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002698 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 hci_req_unlock(hdev);
2702 hci_dev_put(hdev);
2703 return ret;
2704}
2705
2706int hci_dev_reset_stat(__u16 dev)
2707{
2708 struct hci_dev *hdev;
2709 int ret = 0;
2710
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002711 hdev = hci_dev_get(dev);
2712 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 return -ENODEV;
2714
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002715 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2716 ret = -EBUSY;
2717 goto done;
2718 }
2719
Marcel Holtmann4a964402014-07-02 19:10:33 +02002720 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002721 ret = -EOPNOTSUPP;
2722 goto done;
2723 }
2724
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2726
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002727done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 return ret;
2730}
2731
Johan Hedberg123abc02014-07-10 12:09:07 +03002732static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2733{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002734 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002735
2736 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2737
2738 if ((scan & SCAN_PAGE))
2739 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2740 &hdev->dev_flags);
2741 else
2742 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2743 &hdev->dev_flags);
2744
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002745 if ((scan & SCAN_INQUIRY)) {
2746 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2747 &hdev->dev_flags);
2748 } else {
2749 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2750 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2751 &hdev->dev_flags);
2752 }
2753
Johan Hedberg123abc02014-07-10 12:09:07 +03002754 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755 return;
2756
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002757 if (conn_changed || discov_changed) {
2758 /* In case this was disabled through mgmt */
2759 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2760
2761 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2762 mgmt_update_adv_data(hdev);
2763
Johan Hedberg123abc02014-07-10 12:09:07 +03002764 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002765 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002766}
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768int hci_dev_cmd(unsigned int cmd, void __user *arg)
2769{
2770 struct hci_dev *hdev;
2771 struct hci_dev_req dr;
2772 int err = 0;
2773
2774 if (copy_from_user(&dr, arg, sizeof(dr)))
2775 return -EFAULT;
2776
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002777 hdev = hci_dev_get(dr.dev_id);
2778 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 return -ENODEV;
2780
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002781 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2782 err = -EBUSY;
2783 goto done;
2784 }
2785
Marcel Holtmann4a964402014-07-02 19:10:33 +02002786 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002787 err = -EOPNOTSUPP;
2788 goto done;
2789 }
2790
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002791 if (hdev->dev_type != HCI_BREDR) {
2792 err = -EOPNOTSUPP;
2793 goto done;
2794 }
2795
Johan Hedberg56f87902013-10-02 13:43:13 +03002796 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2797 err = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 switch (cmd) {
2802 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002803 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2804 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 break;
2806
2807 case HCISETENCRYPT:
2808 if (!lmp_encrypt_capable(hdev)) {
2809 err = -EOPNOTSUPP;
2810 break;
2811 }
2812
2813 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2814 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002815 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2816 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 if (err)
2818 break;
2819 }
2820
Johan Hedberg01178cd2013-03-05 20:37:41 +02002821 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2822 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 break;
2824
2825 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002826 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2827 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002828
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002829 /* Ensure that the connectable and discoverable states
2830 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002831 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002832 if (!err)
2833 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 break;
2835
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002836 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002837 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2838 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002839 break;
2840
2841 case HCISETLINKMODE:
2842 hdev->link_mode = ((__u16) dr.dev_opt) &
2843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2844 break;
2845
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 case HCISETPTYPE:
2847 hdev->pkt_type = (__u16) dr.dev_opt;
2848 break;
2849
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 break;
2854
2855 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 break;
2859
2860 default:
2861 err = -EINVAL;
2862 break;
2863 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002864
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002865done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 hci_dev_put(hdev);
2867 return err;
2868}
2869
2870int hci_get_dev_list(void __user *arg)
2871{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002872 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873 struct hci_dev_list_req *dl;
2874 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 int n = 0, size, err;
2876 __u16 dev_num;
2877
2878 if (get_user(dev_num, (__u16 __user *) arg))
2879 return -EFAULT;
2880
2881 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2882 return -EINVAL;
2883
2884 size = sizeof(*dl) + dev_num * sizeof(*dr);
2885
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002886 dl = kzalloc(size, GFP_KERNEL);
2887 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 return -ENOMEM;
2889
2890 dr = dl->dev_req;
2891
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002892 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002893 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002894 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002895
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002896 /* When the auto-off is configured it means the transport
2897 * is running, but in that case still indicate that the
2898 * device is actually down.
2899 */
2900 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2901 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002902
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002904 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002905
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 if (++n >= dev_num)
2907 break;
2908 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002909 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910
2911 dl->dev_num = n;
2912 size = sizeof(*dl) + n * sizeof(*dr);
2913
2914 err = copy_to_user(arg, dl, size);
2915 kfree(dl);
2916
2917 return err ? -EFAULT : 0;
2918}
2919
2920int hci_get_dev_info(void __user *arg)
2921{
2922 struct hci_dev *hdev;
2923 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002924 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925 int err = 0;
2926
2927 if (copy_from_user(&di, arg, sizeof(di)))
2928 return -EFAULT;
2929
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002930 hdev = hci_dev_get(di.dev_id);
2931 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 return -ENODEV;
2933
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002934 /* When the auto-off is configured it means the transport
2935 * is running, but in that case still indicate that the
2936 * device is actually down.
2937 */
2938 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2939 flags = hdev->flags & ~BIT(HCI_UP);
2940 else
2941 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002942
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 strcpy(di.name, hdev->name);
2944 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002945 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002946 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002948 if (lmp_bredr_capable(hdev)) {
2949 di.acl_mtu = hdev->acl_mtu;
2950 di.acl_pkts = hdev->acl_pkts;
2951 di.sco_mtu = hdev->sco_mtu;
2952 di.sco_pkts = hdev->sco_pkts;
2953 } else {
2954 di.acl_mtu = hdev->le_mtu;
2955 di.acl_pkts = hdev->le_pkts;
2956 di.sco_mtu = 0;
2957 di.sco_pkts = 0;
2958 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 di.link_policy = hdev->link_policy;
2960 di.link_mode = hdev->link_mode;
2961
2962 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2963 memcpy(&di.features, &hdev->features, sizeof(di.features));
2964
2965 if (copy_to_user(arg, &di, sizeof(di)))
2966 err = -EFAULT;
2967
2968 hci_dev_put(hdev);
2969
2970 return err;
2971}
2972
2973/* ---- Interface to HCI drivers ---- */
2974
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002975static int hci_rfkill_set_block(void *data, bool blocked)
2976{
2977 struct hci_dev *hdev = data;
2978
2979 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2980
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002981 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2982 return -EBUSY;
2983
Johan Hedberg5e130362013-09-13 08:58:17 +03002984 if (blocked) {
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002986 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2987 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002988 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002989 } else {
2990 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002991 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002992
2993 return 0;
2994}
2995
2996static const struct rfkill_ops hci_rfkill_ops = {
2997 .set_block = hci_rfkill_set_block,
2998};
2999
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003000static void hci_power_on(struct work_struct *work)
3001{
3002 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003003 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003004
3005 BT_DBG("%s", hdev->name);
3006
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003007 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003008 if (err < 0) {
3009 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003010 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003011 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003012
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003013 /* During the HCI setup phase, a few error conditions are
3014 * ignored and they need to be checked now. If they are still
3015 * valid, it is important to turn the device back off.
3016 */
3017 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003018 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003019 (hdev->dev_type == HCI_BREDR &&
3020 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3021 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003022 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3023 hci_dev_do_close(hdev);
3024 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003025 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3026 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003027 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003028
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003029 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003030 /* For unconfigured devices, set the HCI_RAW flag
3031 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003032 */
3033 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3034 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003035
3036 /* For fully configured devices, this will send
3037 * the Index Added event. For unconfigured devices,
3038 * it will send Unconfigued Index Added event.
3039 *
3040 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3041 * and no event will be send.
3042 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003043 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003044 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003045 /* When the controller is now configured, then it
3046 * is important to clear the HCI_RAW flag.
3047 */
3048 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3049 clear_bit(HCI_RAW, &hdev->flags);
3050
Marcel Holtmannd603b762014-07-06 12:11:14 +02003051 /* Powering on the controller with HCI_CONFIG set only
3052 * happens with the transition from unconfigured to
3053 * configured. This will send the Index Added event.
3054 */
3055 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003056 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003057}
3058
3059static void hci_power_off(struct work_struct *work)
3060{
Johan Hedberg32435532011-11-07 22:16:04 +02003061 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003062 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003063
3064 BT_DBG("%s", hdev->name);
3065
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003066 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003067}
3068
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003069static void hci_discov_off(struct work_struct *work)
3070{
3071 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003072
3073 hdev = container_of(work, struct hci_dev, discov_off.work);
3074
3075 BT_DBG("%s", hdev->name);
3076
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003077 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003078}
3079
Johan Hedberg35f74982014-02-18 17:14:32 +02003080void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003081{
Johan Hedberg48210022013-01-27 00:31:28 +02003082 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003083
Johan Hedberg48210022013-01-27 00:31:28 +02003084 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3085 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003086 kfree(uuid);
3087 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003088}
3089
Johan Hedberg35f74982014-02-18 17:14:32 +02003090void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003091{
3092 struct list_head *p, *n;
3093
3094 list_for_each_safe(p, n, &hdev->link_keys) {
3095 struct link_key *key;
3096
3097 key = list_entry(p, struct link_key, list);
3098
3099 list_del(p);
3100 kfree(key);
3101 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003102}
3103
Johan Hedberg35f74982014-02-18 17:14:32 +02003104void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003105{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003106 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003107
Johan Hedberg970d0f12014-11-13 14:37:47 +02003108 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3109 list_del_rcu(&k->list);
3110 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003111 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003112}
3113
Johan Hedberg970c4e42014-02-18 10:19:33 +02003114void hci_smp_irks_clear(struct hci_dev *hdev)
3115{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003116 struct smp_irk *k;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003117
Johan Hedbergadae20c2014-11-13 14:37:48 +02003118 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3119 list_del_rcu(&k->list);
3120 kfree_rcu(k, rcu);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003121 }
3122}
3123
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003124struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3125{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003126 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003127
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003128 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003129 if (bacmp(bdaddr, &k->bdaddr) == 0)
3130 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003131
3132 return NULL;
3133}
3134
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303135static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003136 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003137{
3138 /* Legacy key */
3139 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303140 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003141
3142 /* Debug keys are insecure so don't store them persistently */
3143 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303144 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003145
3146 /* Changed combination key and there's no previous one */
3147 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303148 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003149
3150 /* Security mode 3 case */
3151 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303152 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003153
3154 /* Neither local nor remote side had no-bonding as requirement */
3155 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303156 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003157
3158 /* Local side had dedicated bonding as requirement */
3159 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303160 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003161
3162 /* Remote side had dedicated bonding as requirement */
3163 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303164 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003165
3166 /* If none of the above criteria match, then don't store the key
3167 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303168 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003169}
3170
Johan Hedberge804d252014-07-16 11:42:28 +03003171static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003172{
Johan Hedberge804d252014-07-16 11:42:28 +03003173 if (type == SMP_LTK)
3174 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003175
Johan Hedberge804d252014-07-16 11:42:28 +03003176 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003177}
3178
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003179struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003180 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003181{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003182 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003183
Johan Hedberg970d0f12014-11-13 14:37:47 +02003184 rcu_read_lock();
3185 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003186 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003187 continue;
3188
Johan Hedberge804d252014-07-16 11:42:28 +03003189 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003190 continue;
3191
Johan Hedberg970d0f12014-11-13 14:37:47 +02003192 rcu_read_unlock();
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003193 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003194 }
Johan Hedberg970d0f12014-11-13 14:37:47 +02003195 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003196
3197 return NULL;
3198}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003199
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003200struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003201 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003202{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003203 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003204
Johan Hedberg970d0f12014-11-13 14:37:47 +02003205 rcu_read_lock();
3206 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003207 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003208 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberg970d0f12014-11-13 14:37:47 +02003209 ltk_role(k->type) == role) {
3210 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003211 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003212 }
3213 }
3214 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003215
3216 return NULL;
3217}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003218
Johan Hedberg970c4e42014-02-18 10:19:33 +02003219struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3220{
3221 struct smp_irk *irk;
3222
Johan Hedbergadae20c2014-11-13 14:37:48 +02003223 rcu_read_lock();
3224 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3225 if (!bacmp(&irk->rpa, rpa)) {
3226 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003227 return irk;
3228 }
3229 }
3230
Johan Hedbergadae20c2014-11-13 14:37:48 +02003231 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3232 if (smp_irk_matches(hdev, irk->val, rpa)) {
3233 bacpy(&irk->rpa, rpa);
3234 rcu_read_unlock();
3235 return irk;
3236 }
3237 }
3238 rcu_read_unlock();
3239
Johan Hedberg970c4e42014-02-18 10:19:33 +02003240 return NULL;
3241}
3242
3243struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 addr_type)
3245{
3246 struct smp_irk *irk;
3247
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003248 /* Identity Address must be public or static random */
3249 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3250 return NULL;
3251
Johan Hedbergadae20c2014-11-13 14:37:48 +02003252 rcu_read_lock();
3253 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003254 if (addr_type == irk->addr_type &&
Johan Hedbergadae20c2014-11-13 14:37:48 +02003255 bacmp(bdaddr, &irk->bdaddr) == 0) {
3256 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003257 return irk;
Johan Hedbergadae20c2014-11-13 14:37:48 +02003258 }
Johan Hedberg970c4e42014-02-18 10:19:33 +02003259 }
Johan Hedbergadae20c2014-11-13 14:37:48 +02003260 rcu_read_unlock();
Johan Hedberg970c4e42014-02-18 10:19:33 +02003261
3262 return NULL;
3263}
3264
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003265struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003266 bdaddr_t *bdaddr, u8 *val, u8 type,
3267 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003268{
3269 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303270 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003271
3272 old_key = hci_find_link_key(hdev, bdaddr);
3273 if (old_key) {
3274 old_key_type = old_key->type;
3275 key = old_key;
3276 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003277 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003278 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003279 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003280 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003281 list_add(&key->list, &hdev->link_keys);
3282 }
3283
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003284 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003285
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003286 /* Some buggy controller combinations generate a changed
3287 * combination key for legacy pairing even when there's no
3288 * previous key */
3289 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003290 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003291 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003292 if (conn)
3293 conn->key_type = type;
3294 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003295
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003296 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003297 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003298 key->pin_len = pin_len;
3299
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003300 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003301 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003302 else
3303 key->type = type;
3304
Johan Hedberg7652ff62014-06-24 13:15:49 +03003305 if (persistent)
3306 *persistent = hci_persistent_key(hdev, conn, type,
3307 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003308
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003309 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003310}
3311
Johan Hedbergca9142b2014-02-19 14:57:44 +02003312struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003313 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003314 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003315{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003316 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003317 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003318
Johan Hedberge804d252014-07-16 11:42:28 +03003319 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003320 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003321 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003322 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003323 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003324 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003325 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003326 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003327 }
3328
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003329 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003330 key->bdaddr_type = addr_type;
3331 memcpy(key->val, tk, sizeof(key->val));
3332 key->authenticated = authenticated;
3333 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003334 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003335 key->enc_size = enc_size;
3336 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003337
Johan Hedbergca9142b2014-02-19 14:57:44 +02003338 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003339}
3340
Johan Hedbergca9142b2014-02-19 14:57:44 +02003341struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3342 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003343{
3344 struct smp_irk *irk;
3345
3346 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3347 if (!irk) {
3348 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3349 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003350 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003351
3352 bacpy(&irk->bdaddr, bdaddr);
3353 irk->addr_type = addr_type;
3354
Johan Hedbergadae20c2014-11-13 14:37:48 +02003355 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003356 }
3357
3358 memcpy(irk->val, val, 16);
3359 bacpy(&irk->rpa, rpa);
3360
Johan Hedbergca9142b2014-02-19 14:57:44 +02003361 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003362}
3363
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003364int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3365{
3366 struct link_key *key;
3367
3368 key = hci_find_link_key(hdev, bdaddr);
3369 if (!key)
3370 return -ENOENT;
3371
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003372 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003373
3374 list_del(&key->list);
3375 kfree(key);
3376
3377 return 0;
3378}
3379
Johan Hedberge0b2b272014-02-18 17:14:31 +02003380int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003381{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003382 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003383 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003384
Johan Hedberg970d0f12014-11-13 14:37:47 +02003385 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003386 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003387 continue;
3388
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003390
Johan Hedberg970d0f12014-11-13 14:37:47 +02003391 list_del_rcu(&k->list);
3392 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003393 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003394 }
3395
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003396 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003397}
3398
Johan Hedberga7ec7332014-02-18 17:14:35 +02003399void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3400{
Johan Hedbergadae20c2014-11-13 14:37:48 +02003401 struct smp_irk *k;
Johan Hedberga7ec7332014-02-18 17:14:35 +02003402
Johan Hedbergadae20c2014-11-13 14:37:48 +02003403 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003404 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3405 continue;
3406
3407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3408
Johan Hedbergadae20c2014-11-13 14:37:48 +02003409 list_del_rcu(&k->list);
3410 kfree_rcu(k, rcu);
Johan Hedberga7ec7332014-02-18 17:14:35 +02003411 }
3412}
3413
Ville Tervo6bd32322011-02-16 16:32:41 +02003414/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003415static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003416{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003417 struct hci_dev *hdev = container_of(work, struct hci_dev,
3418 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003419
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003420 if (hdev->sent_cmd) {
3421 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3422 u16 opcode = __le16_to_cpu(sent->opcode);
3423
3424 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3425 } else {
3426 BT_ERR("%s command tx timeout", hdev->name);
3427 }
3428
Ville Tervo6bd32322011-02-16 16:32:41 +02003429 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003430 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003431}
3432
Szymon Janc2763eda2011-03-22 13:12:22 +01003433struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003434 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003435{
3436 struct oob_data *data;
3437
3438 list_for_each_entry(data, &hdev->remote_oob_data, list)
3439 if (bacmp(bdaddr, &data->bdaddr) == 0)
3440 return data;
3441
3442 return NULL;
3443}
3444
3445int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3446{
3447 struct oob_data *data;
3448
3449 data = hci_find_remote_oob_data(hdev, bdaddr);
3450 if (!data)
3451 return -ENOENT;
3452
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003454
3455 list_del(&data->list);
3456 kfree(data);
3457
3458 return 0;
3459}
3460
Johan Hedberg35f74982014-02-18 17:14:32 +02003461void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003462{
3463 struct oob_data *data, *n;
3464
3465 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3466 list_del(&data->list);
3467 kfree(data);
3468 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003469}
3470
Marcel Holtmann07988722014-01-10 02:07:29 -08003471int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3472 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003473{
3474 struct oob_data *data;
3475
3476 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003477 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003478 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003479 if (!data)
3480 return -ENOMEM;
3481
3482 bacpy(&data->bdaddr, bdaddr);
3483 list_add(&data->list, &hdev->remote_oob_data);
3484 }
3485
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003486 memcpy(data->hash192, hash, sizeof(data->hash192));
3487 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003488
Marcel Holtmann07988722014-01-10 02:07:29 -08003489 memset(data->hash256, 0, sizeof(data->hash256));
3490 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3491
3492 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3493
3494 return 0;
3495}
3496
3497int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3498 u8 *hash192, u8 *randomizer192,
3499 u8 *hash256, u8 *randomizer256)
3500{
3501 struct oob_data *data;
3502
3503 data = hci_find_remote_oob_data(hdev, bdaddr);
3504 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003505 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003506 if (!data)
3507 return -ENOMEM;
3508
3509 bacpy(&data->bdaddr, bdaddr);
3510 list_add(&data->list, &hdev->remote_oob_data);
3511 }
3512
3513 memcpy(data->hash192, hash192, sizeof(data->hash192));
3514 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3515
3516 memcpy(data->hash256, hash256, sizeof(data->hash256));
3517 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3518
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003519 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003520
3521 return 0;
3522}
3523
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003524struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003525 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003526{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003527 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003528
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003529 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003530 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003531 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003532 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003533
3534 return NULL;
3535}
3536
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003537void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003538{
3539 struct list_head *p, *n;
3540
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003541 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003542 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003543
3544 list_del(p);
3545 kfree(b);
3546 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003547}
3548
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003549int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003550{
3551 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003552
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003553 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003554 return -EBADF;
3555
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003556 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003557 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003558
Johan Hedberg27f70f32014-07-21 10:50:06 +03003559 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003560 if (!entry)
3561 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003562
3563 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003564 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003565
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003566 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003567
3568 return 0;
3569}
3570
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003571int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003572{
3573 struct bdaddr_list *entry;
3574
Johan Hedberg35f74982014-02-18 17:14:32 +02003575 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003576 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003577 return 0;
3578 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003579
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003580 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003581 if (!entry)
3582 return -ENOENT;
3583
3584 list_del(&entry->list);
3585 kfree(entry);
3586
3587 return 0;
3588}
3589
Andre Guedes15819a72014-02-03 13:56:18 -03003590/* This function requires the caller holds hdev->lock */
3591struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3592 bdaddr_t *addr, u8 addr_type)
3593{
3594 struct hci_conn_params *params;
3595
Johan Hedberg738f6182014-07-03 19:33:51 +03003596 /* The conn params list only contains identity addresses */
3597 if (!hci_is_identity_address(addr, addr_type))
3598 return NULL;
3599
Andre Guedes15819a72014-02-03 13:56:18 -03003600 list_for_each_entry(params, &hdev->le_conn_params, list) {
3601 if (bacmp(&params->addr, addr) == 0 &&
3602 params->addr_type == addr_type) {
3603 return params;
3604 }
3605 }
3606
3607 return NULL;
3608}
3609
Andre Guedescef952c2014-02-26 20:21:49 -03003610static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3611{
3612 struct hci_conn *conn;
3613
3614 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3615 if (!conn)
3616 return false;
3617
3618 if (conn->dst_type != type)
3619 return false;
3620
3621 if (conn->state != BT_CONNECTED)
3622 return false;
3623
3624 return true;
3625}
3626
Andre Guedes15819a72014-02-03 13:56:18 -03003627/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003628struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3629 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003630{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003631 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003632
Johan Hedberg738f6182014-07-03 19:33:51 +03003633 /* The list only contains identity addresses */
3634 if (!hci_is_identity_address(addr, addr_type))
3635 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003636
Johan Hedberg501f8822014-07-04 12:37:26 +03003637 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003638 if (bacmp(&param->addr, addr) == 0 &&
3639 param->addr_type == addr_type)
3640 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003641 }
3642
3643 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003644}
3645
3646/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003647struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3648 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003649{
3650 struct hci_conn_params *params;
3651
Johan Hedbergc46245b2014-07-02 17:37:33 +03003652 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003653 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003654
Andre Guedes15819a72014-02-03 13:56:18 -03003655 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003656 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003657 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003658
3659 params = kzalloc(sizeof(*params), GFP_KERNEL);
3660 if (!params) {
3661 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003662 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003663 }
3664
3665 bacpy(&params->addr, addr);
3666 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003667
3668 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003669 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003670
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003671 params->conn_min_interval = hdev->le_conn_min_interval;
3672 params->conn_max_interval = hdev->le_conn_max_interval;
3673 params->conn_latency = hdev->le_conn_latency;
3674 params->supervision_timeout = hdev->le_supv_timeout;
3675 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3676
3677 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3678
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003679 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003680}
3681
3682/* This function requires the caller holds hdev->lock */
3683int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003684 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003685{
3686 struct hci_conn_params *params;
3687
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003688 params = hci_conn_params_add(hdev, addr, addr_type);
3689 if (!params)
3690 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003691
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003692 if (params->auto_connect == auto_connect)
3693 return 0;
3694
Johan Hedberg95305ba2014-07-04 12:37:21 +03003695 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003696
Andre Guedescef952c2014-02-26 20:21:49 -03003697 switch (auto_connect) {
3698 case HCI_AUTO_CONN_DISABLED:
3699 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003700 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003701 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003702 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003703 list_add(&params->action, &hdev->pend_le_reports);
3704 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003705 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003706 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003707 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003708 if (!is_connected(hdev, addr, addr_type)) {
3709 list_add(&params->action, &hdev->pend_le_conns);
3710 hci_update_background_scan(hdev);
3711 }
Andre Guedescef952c2014-02-26 20:21:49 -03003712 break;
3713 }
Andre Guedes15819a72014-02-03 13:56:18 -03003714
Johan Hedberg851efca2014-07-02 22:42:00 +03003715 params->auto_connect = auto_connect;
3716
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003717 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3718 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003719
3720 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003721}
3722
Johan Hedbergf6c63242014-08-15 21:06:59 +03003723static void hci_conn_params_free(struct hci_conn_params *params)
3724{
3725 if (params->conn) {
3726 hci_conn_drop(params->conn);
3727 hci_conn_put(params->conn);
3728 }
3729
3730 list_del(&params->action);
3731 list_del(&params->list);
3732 kfree(params);
3733}
3734
Andre Guedes15819a72014-02-03 13:56:18 -03003735/* This function requires the caller holds hdev->lock */
3736void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3737{
3738 struct hci_conn_params *params;
3739
3740 params = hci_conn_params_lookup(hdev, addr, addr_type);
3741 if (!params)
3742 return;
3743
Johan Hedbergf6c63242014-08-15 21:06:59 +03003744 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003745
Johan Hedberg95305ba2014-07-04 12:37:21 +03003746 hci_update_background_scan(hdev);
3747
Andre Guedes15819a72014-02-03 13:56:18 -03003748 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3749}
3750
3751/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003752void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003753{
3754 struct hci_conn_params *params, *tmp;
3755
3756 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003757 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3758 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003759 list_del(&params->list);
3760 kfree(params);
3761 }
3762
Johan Hedberg55af49a2014-07-02 17:37:26 +03003763 BT_DBG("All LE disabled connection parameters were removed");
3764}
3765
3766/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003767void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003768{
3769 struct hci_conn_params *params, *tmp;
3770
Johan Hedbergf6c63242014-08-15 21:06:59 +03003771 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3772 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003773
Johan Hedberga2f41a82014-07-04 12:37:19 +03003774 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003775
Andre Guedes15819a72014-02-03 13:56:18 -03003776 BT_DBG("All LE connection parameters were removed");
3777}
3778
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003779static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003780{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003781 if (status) {
3782 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003783
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003784 hci_dev_lock(hdev);
3785 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3786 hci_dev_unlock(hdev);
3787 return;
3788 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003789}
3790
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003791static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003792{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003793 /* General inquiry access code (GIAC) */
3794 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3795 struct hci_request req;
3796 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003797 int err;
3798
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003799 if (status) {
3800 BT_ERR("Failed to disable LE scanning: status %d", status);
3801 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003802 }
3803
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003804 switch (hdev->discovery.type) {
3805 case DISCOV_TYPE_LE:
3806 hci_dev_lock(hdev);
3807 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3808 hci_dev_unlock(hdev);
3809 break;
3810
3811 case DISCOV_TYPE_INTERLEAVED:
3812 hci_req_init(&req, hdev);
3813
3814 memset(&cp, 0, sizeof(cp));
3815 memcpy(&cp.lap, lap, sizeof(cp.lap));
3816 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3817 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3818
3819 hci_dev_lock(hdev);
3820
3821 hci_inquiry_cache_flush(hdev);
3822
3823 err = hci_req_run(&req, inquiry_complete);
3824 if (err) {
3825 BT_ERR("Inquiry request failed: err %d", err);
3826 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827 }
3828
3829 hci_dev_unlock(hdev);
3830 break;
3831 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003832}
3833
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003834static void le_scan_disable_work(struct work_struct *work)
3835{
3836 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003837 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003838 struct hci_request req;
3839 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003840
3841 BT_DBG("%s", hdev->name);
3842
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003843 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003844
Andre Guedesb1efcc22014-02-26 20:21:40 -03003845 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003846
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003847 err = hci_req_run(&req, le_scan_disable_work_complete);
3848 if (err)
3849 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003850}
3851
Johan Hedberg8d972502014-02-28 12:54:14 +02003852static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3853{
3854 struct hci_dev *hdev = req->hdev;
3855
3856 /* If we're advertising or initiating an LE connection we can't
3857 * go ahead and change the random address at this time. This is
3858 * because the eventual initiator address used for the
3859 * subsequently created connection will be undefined (some
3860 * controllers use the new address and others the one we had
3861 * when the operation started).
3862 *
3863 * In this kind of scenario skip the update and let the random
3864 * address be updated at the next cycle.
3865 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003866 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003867 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3868 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003869 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003870 return;
3871 }
3872
3873 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3874}
3875
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003876int hci_update_random_address(struct hci_request *req, bool require_privacy,
3877 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003878{
3879 struct hci_dev *hdev = req->hdev;
3880 int err;
3881
3882 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003883 * current RPA has expired or there is something else than
3884 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003885 */
3886 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003887 int to;
3888
3889 *own_addr_type = ADDR_LE_DEV_RANDOM;
3890
3891 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003892 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003893 return 0;
3894
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003895 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003896 if (err < 0) {
3897 BT_ERR("%s failed to generate new RPA", hdev->name);
3898 return err;
3899 }
3900
Johan Hedberg8d972502014-02-28 12:54:14 +02003901 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003902
3903 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3904 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3905
3906 return 0;
3907 }
3908
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003909 /* In case of required privacy without resolvable private address,
3910 * use an unresolvable private address. This is useful for active
3911 * scanning and non-connectable advertising.
3912 */
3913 if (require_privacy) {
3914 bdaddr_t urpa;
3915
3916 get_random_bytes(&urpa, 6);
3917 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3918
3919 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003920 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003921 return 0;
3922 }
3923
Johan Hedbergebd3a742014-02-23 19:42:21 +02003924 /* If forcing static address is in use or there is no public
3925 * address use the static address as random address (but skip
3926 * the HCI command if the current random address is already the
3927 * static one.
3928 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003929 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
3932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3933 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3934 &hdev->static_addr);
3935 return 0;
3936 }
3937
3938 /* Neither privacy nor static address is being used so use a
3939 * public address.
3940 */
3941 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3942
3943 return 0;
3944}
3945
Johan Hedberga1f4c312014-02-27 14:05:41 +02003946/* Copy the Identity Address of the controller.
3947 *
3948 * If the controller has a public BD_ADDR, then by default use that one.
3949 * If this is a LE only controller without a public address, default to
3950 * the static random address.
3951 *
3952 * For debugging purposes it is possible to force controllers with a
3953 * public address to use the static random address instead.
3954 */
3955void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3956 u8 *bdaddr_type)
3957{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003958 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003959 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3960 bacpy(bdaddr, &hdev->static_addr);
3961 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3962 } else {
3963 bacpy(bdaddr, &hdev->bdaddr);
3964 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3965 }
3966}
3967
David Herrmann9be0dab2012-04-22 14:39:57 +02003968/* Alloc HCI device */
3969struct hci_dev *hci_alloc_dev(void)
3970{
3971 struct hci_dev *hdev;
3972
Johan Hedberg27f70f32014-07-21 10:50:06 +03003973 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003974 if (!hdev)
3975 return NULL;
3976
David Herrmannb1b813d2012-04-22 14:39:58 +02003977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3978 hdev->esco_type = (ESCO_HV1);
3979 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003980 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3981 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003982 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003983 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3984 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003985
David Herrmannb1b813d2012-04-22 14:39:58 +02003986 hdev->sniff_max_interval = 800;
3987 hdev->sniff_min_interval = 80;
3988
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003989 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003990 hdev->le_adv_min_interval = 0x0800;
3991 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003992 hdev->le_scan_interval = 0x0060;
3993 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003994 hdev->le_conn_min_interval = 0x0028;
3995 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003996 hdev->le_conn_latency = 0x0000;
3997 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003998
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003999 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01004000 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02004001 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4002 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02004003
David Herrmannb1b813d2012-04-22 14:39:58 +02004004 mutex_init(&hdev->lock);
4005 mutex_init(&hdev->req_lock);
4006
4007 INIT_LIST_HEAD(&hdev->mgmt_pending);
4008 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004009 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004010 INIT_LIST_HEAD(&hdev->uuids);
4011 INIT_LIST_HEAD(&hdev->link_keys);
4012 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004013 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004014 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004015 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004016 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004017 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004018 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004019 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004020
4021 INIT_WORK(&hdev->rx_work, hci_rx_work);
4022 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4023 INIT_WORK(&hdev->tx_work, hci_tx_work);
4024 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004025
David Herrmannb1b813d2012-04-22 14:39:58 +02004026 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4027 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4028 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4029
David Herrmannb1b813d2012-04-22 14:39:58 +02004030 skb_queue_head_init(&hdev->rx_q);
4031 skb_queue_head_init(&hdev->cmd_q);
4032 skb_queue_head_init(&hdev->raw_q);
4033
4034 init_waitqueue_head(&hdev->req_wait_q);
4035
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004036 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004037
David Herrmannb1b813d2012-04-22 14:39:58 +02004038 hci_init_sysfs(hdev);
4039 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004040
4041 return hdev;
4042}
4043EXPORT_SYMBOL(hci_alloc_dev);
4044
4045/* Free HCI device */
4046void hci_free_dev(struct hci_dev *hdev)
4047{
David Herrmann9be0dab2012-04-22 14:39:57 +02004048 /* will free via device release */
4049 put_device(&hdev->dev);
4050}
4051EXPORT_SYMBOL(hci_free_dev);
4052
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053/* Register HCI device */
4054int hci_register_dev(struct hci_dev *hdev)
4055{
David Herrmannb1b813d2012-04-22 14:39:58 +02004056 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
Marcel Holtmann74292d52014-07-06 15:50:27 +02004058 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 return -EINVAL;
4060
Mat Martineau08add512011-11-02 16:18:36 -07004061 /* Do not allow HCI_AMP devices to register at index 0,
4062 * so the index can be used as the AMP controller ID.
4063 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004064 switch (hdev->dev_type) {
4065 case HCI_BREDR:
4066 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4067 break;
4068 case HCI_AMP:
4069 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4070 break;
4071 default:
4072 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004074
Sasha Levin3df92b32012-05-27 22:36:56 +02004075 if (id < 0)
4076 return id;
4077
Linus Torvalds1da177e2005-04-16 15:20:36 -07004078 sprintf(hdev->name, "hci%d", id);
4079 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004080
4081 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4082
Kees Cookd8537542013-07-03 15:04:57 -07004083 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4084 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004085 if (!hdev->workqueue) {
4086 error = -ENOMEM;
4087 goto err;
4088 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004089
Kees Cookd8537542013-07-03 15:04:57 -07004090 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4091 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004092 if (!hdev->req_workqueue) {
4093 destroy_workqueue(hdev->workqueue);
4094 error = -ENOMEM;
4095 goto err;
4096 }
4097
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004098 if (!IS_ERR_OR_NULL(bt_debugfs))
4099 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4100
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004101 dev_set_name(&hdev->dev, "%s", hdev->name);
4102
4103 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004104 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004105 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004107 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004108 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4109 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004110 if (hdev->rfkill) {
4111 if (rfkill_register(hdev->rfkill) < 0) {
4112 rfkill_destroy(hdev->rfkill);
4113 hdev->rfkill = NULL;
4114 }
4115 }
4116
Johan Hedberg5e130362013-09-13 08:58:17 +03004117 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4118 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4119
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004120 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004121 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004122
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004123 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004124 /* Assume BR/EDR support until proven otherwise (such as
4125 * through reading supported features during init.
4126 */
4127 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4128 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004129
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004130 write_lock(&hci_dev_list_lock);
4131 list_add(&hdev->list, &hci_dev_list);
4132 write_unlock(&hci_dev_list_lock);
4133
Marcel Holtmann4a964402014-07-02 19:10:33 +02004134 /* Devices that are marked for raw-only usage are unconfigured
4135 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004136 */
4137 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004138 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004139
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004141 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142
Johan Hedberg19202572013-01-14 22:33:51 +02004143 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004144
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004146
David Herrmann33ca9542011-10-08 14:58:49 +02004147err_wqueue:
4148 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004149 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004150err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004151 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004152
David Herrmann33ca9542011-10-08 14:58:49 +02004153 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004154}
4155EXPORT_SYMBOL(hci_register_dev);
4156
4157/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004158void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159{
Sasha Levin3df92b32012-05-27 22:36:56 +02004160 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004161
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004162 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163
Johan Hovold94324962012-03-15 14:48:41 +01004164 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4165
Sasha Levin3df92b32012-05-27 22:36:56 +02004166 id = hdev->id;
4167
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004168 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004169 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004170 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171
4172 hci_dev_do_close(hdev);
4173
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304174 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004175 kfree_skb(hdev->reassembly[i]);
4176
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004177 cancel_work_sync(&hdev->power_on);
4178
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004179 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004180 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4181 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004182 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004183 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004184 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004185 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004186
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004187 /* mgmt_index_removed should take care of emptying the
4188 * pending list */
4189 BUG_ON(!list_empty(&hdev->mgmt_pending));
4190
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 hci_notify(hdev, HCI_DEV_UNREG);
4192
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004193 if (hdev->rfkill) {
4194 rfkill_unregister(hdev->rfkill);
4195 rfkill_destroy(hdev->rfkill);
4196 }
4197
Johan Hedberg711eafe2014-08-08 09:32:52 +03004198 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004199
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004200 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004201
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004202 debugfs_remove_recursive(hdev->debugfs);
4203
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004204 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004205 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004206
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004207 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004208 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004209 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004210 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004211 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004212 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004213 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004214 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004215 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004216 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004217 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004218
David Herrmanndc946bd2012-01-07 15:47:24 +01004219 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004220
4221 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222}
4223EXPORT_SYMBOL(hci_unregister_dev);
4224
4225/* Suspend HCI device */
4226int hci_suspend_dev(struct hci_dev *hdev)
4227{
4228 hci_notify(hdev, HCI_DEV_SUSPEND);
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_suspend_dev);
4232
4233/* Resume HCI device */
4234int hci_resume_dev(struct hci_dev *hdev)
4235{
4236 hci_notify(hdev, HCI_DEV_RESUME);
4237 return 0;
4238}
4239EXPORT_SYMBOL(hci_resume_dev);
4240
Marcel Holtmann75e05692014-11-02 08:15:38 +01004241/* Reset HCI device */
4242int hci_reset_dev(struct hci_dev *hdev)
4243{
4244 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4245 struct sk_buff *skb;
4246
4247 skb = bt_skb_alloc(3, GFP_ATOMIC);
4248 if (!skb)
4249 return -ENOMEM;
4250
4251 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4252 memcpy(skb_put(skb, 3), hw_err, 3);
4253
4254 /* Send Hardware Error to upper stack */
4255 return hci_recv_frame(hdev, skb);
4256}
4257EXPORT_SYMBOL(hci_reset_dev);
4258
Marcel Holtmann76bca882009-11-18 00:40:39 +01004259/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004260int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004261{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004262 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004263 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004264 kfree_skb(skb);
4265 return -ENXIO;
4266 }
4267
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004268 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004269 bt_cb(skb)->incoming = 1;
4270
4271 /* Time stamp */
4272 __net_timestamp(skb);
4273
Marcel Holtmann76bca882009-11-18 00:40:39 +01004274 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004275 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004276
Marcel Holtmann76bca882009-11-18 00:40:39 +01004277 return 0;
4278}
4279EXPORT_SYMBOL(hci_recv_frame);
4280
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304281static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004282 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304283{
4284 int len = 0;
4285 int hlen = 0;
4286 int remain = count;
4287 struct sk_buff *skb;
4288 struct bt_skb_cb *scb;
4289
4290 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004291 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304292 return -EILSEQ;
4293
4294 skb = hdev->reassembly[index];
4295
4296 if (!skb) {
4297 switch (type) {
4298 case HCI_ACLDATA_PKT:
4299 len = HCI_MAX_FRAME_SIZE;
4300 hlen = HCI_ACL_HDR_SIZE;
4301 break;
4302 case HCI_EVENT_PKT:
4303 len = HCI_MAX_EVENT_SIZE;
4304 hlen = HCI_EVENT_HDR_SIZE;
4305 break;
4306 case HCI_SCODATA_PKT:
4307 len = HCI_MAX_SCO_SIZE;
4308 hlen = HCI_SCO_HDR_SIZE;
4309 break;
4310 }
4311
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004312 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304313 if (!skb)
4314 return -ENOMEM;
4315
4316 scb = (void *) skb->cb;
4317 scb->expect = hlen;
4318 scb->pkt_type = type;
4319
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304320 hdev->reassembly[index] = skb;
4321 }
4322
4323 while (count) {
4324 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004325 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304326
4327 memcpy(skb_put(skb, len), data, len);
4328
4329 count -= len;
4330 data += len;
4331 scb->expect -= len;
4332 remain = count;
4333
4334 switch (type) {
4335 case HCI_EVENT_PKT:
4336 if (skb->len == HCI_EVENT_HDR_SIZE) {
4337 struct hci_event_hdr *h = hci_event_hdr(skb);
4338 scb->expect = h->plen;
4339
4340 if (skb_tailroom(skb) < scb->expect) {
4341 kfree_skb(skb);
4342 hdev->reassembly[index] = NULL;
4343 return -ENOMEM;
4344 }
4345 }
4346 break;
4347
4348 case HCI_ACLDATA_PKT:
4349 if (skb->len == HCI_ACL_HDR_SIZE) {
4350 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4351 scb->expect = __le16_to_cpu(h->dlen);
4352
4353 if (skb_tailroom(skb) < scb->expect) {
4354 kfree_skb(skb);
4355 hdev->reassembly[index] = NULL;
4356 return -ENOMEM;
4357 }
4358 }
4359 break;
4360
4361 case HCI_SCODATA_PKT:
4362 if (skb->len == HCI_SCO_HDR_SIZE) {
4363 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4364 scb->expect = h->dlen;
4365
4366 if (skb_tailroom(skb) < scb->expect) {
4367 kfree_skb(skb);
4368 hdev->reassembly[index] = NULL;
4369 return -ENOMEM;
4370 }
4371 }
4372 break;
4373 }
4374
4375 if (scb->expect == 0) {
4376 /* Complete frame */
4377
4378 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004379 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304380
4381 hdev->reassembly[index] = NULL;
4382 return remain;
4383 }
4384 }
4385
4386 return remain;
4387}
4388
Suraj Sumangala99811512010-07-14 13:02:19 +05304389#define STREAM_REASSEMBLY 0
4390
4391int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4392{
4393 int type;
4394 int rem = 0;
4395
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004396 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304397 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4398
4399 if (!skb) {
4400 struct { char type; } *pkt;
4401
4402 /* Start of the frame */
4403 pkt = data;
4404 type = pkt->type;
4405
4406 data++;
4407 count--;
4408 } else
4409 type = bt_cb(skb)->pkt_type;
4410
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004411 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004412 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304413 if (rem < 0)
4414 return rem;
4415
4416 data += (count - rem);
4417 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004418 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304419
4420 return rem;
4421}
4422EXPORT_SYMBOL(hci_recv_stream_fragment);
4423
Linus Torvalds1da177e2005-04-16 15:20:36 -07004424/* ---- Interface to upper protocols ---- */
4425
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426int hci_register_cb(struct hci_cb *cb)
4427{
4428 BT_DBG("%p name %s", cb, cb->name);
4429
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004430 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004432 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004433
4434 return 0;
4435}
4436EXPORT_SYMBOL(hci_register_cb);
4437
4438int hci_unregister_cb(struct hci_cb *cb)
4439{
4440 BT_DBG("%p name %s", cb, cb->name);
4441
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004442 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004444 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445
4446 return 0;
4447}
4448EXPORT_SYMBOL(hci_unregister_cb);
4449
Marcel Holtmann51086992013-10-10 14:54:19 -07004450static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004452 int err;
4453
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004454 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004456 /* Time stamp */
4457 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004459 /* Send copy to monitor */
4460 hci_send_to_monitor(hdev, skb);
4461
4462 if (atomic_read(&hdev->promisc)) {
4463 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004464 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004465 }
4466
4467 /* Get rid of skb owner, prior to sending to the driver. */
4468 skb_orphan(skb);
4469
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004470 err = hdev->send(hdev, skb);
4471 if (err < 0) {
4472 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4473 kfree_skb(skb);
4474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475}
4476
Johan Hedberg3119ae92013-03-05 20:37:44 +02004477void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4478{
4479 skb_queue_head_init(&req->cmd_q);
4480 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004481 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004482}
4483
4484int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4485{
4486 struct hci_dev *hdev = req->hdev;
4487 struct sk_buff *skb;
4488 unsigned long flags;
4489
4490 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4491
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004492 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004493 * commands queued on the HCI request queue.
4494 */
4495 if (req->err) {
4496 skb_queue_purge(&req->cmd_q);
4497 return req->err;
4498 }
4499
Johan Hedberg3119ae92013-03-05 20:37:44 +02004500 /* Do not allow empty requests */
4501 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004502 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004503
4504 skb = skb_peek_tail(&req->cmd_q);
4505 bt_cb(skb)->req.complete = complete;
4506
4507 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4508 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4509 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4510
4511 queue_work(hdev->workqueue, &hdev->cmd_work);
4512
4513 return 0;
4514}
4515
Marcel Holtmann899de762014-07-11 05:51:58 +02004516bool hci_req_pending(struct hci_dev *hdev)
4517{
4518 return (hdev->req_status == HCI_REQ_PEND);
4519}
4520
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004521static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004522 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523{
4524 int len = HCI_COMMAND_HDR_SIZE + plen;
4525 struct hci_command_hdr *hdr;
4526 struct sk_buff *skb;
4527
Linus Torvalds1da177e2005-04-16 15:20:36 -07004528 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004529 if (!skb)
4530 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531
4532 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004533 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 hdr->plen = plen;
4535
4536 if (plen)
4537 memcpy(skb_put(skb, plen), param, plen);
4538
4539 BT_DBG("skb len %d", skb->len);
4540
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004541 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004542 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004543
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004544 return skb;
4545}
4546
4547/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004548int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4549 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004550{
4551 struct sk_buff *skb;
4552
4553 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4554
4555 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4556 if (!skb) {
4557 BT_ERR("%s no memory for command", hdev->name);
4558 return -ENOMEM;
4559 }
4560
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004561 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004562 * single-command requests.
4563 */
4564 bt_cb(skb)->req.start = true;
4565
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004567 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568
4569 return 0;
4570}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571
Johan Hedberg71c76a12013-03-05 20:37:46 +02004572/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004573void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4574 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004575{
4576 struct hci_dev *hdev = req->hdev;
4577 struct sk_buff *skb;
4578
4579 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4580
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004581 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004582 * queueing the HCI command. We can simply return.
4583 */
4584 if (req->err)
4585 return;
4586
Johan Hedberg71c76a12013-03-05 20:37:46 +02004587 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4588 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004589 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4590 hdev->name, opcode);
4591 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004592 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004593 }
4594
4595 if (skb_queue_empty(&req->cmd_q))
4596 bt_cb(skb)->req.start = true;
4597
Johan Hedberg02350a72013-04-03 21:50:29 +03004598 bt_cb(skb)->req.event = event;
4599
Johan Hedberg71c76a12013-03-05 20:37:46 +02004600 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004601}
4602
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004603void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4604 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004605{
4606 hci_req_add_ev(req, opcode, plen, param, 0);
4607}
4608
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004610void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004611{
4612 struct hci_command_hdr *hdr;
4613
4614 if (!hdev->sent_cmd)
4615 return NULL;
4616
4617 hdr = (void *) hdev->sent_cmd->data;
4618
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004619 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004620 return NULL;
4621
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004622 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004623
4624 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4625}
4626
4627/* Send ACL data */
4628static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4629{
4630 struct hci_acl_hdr *hdr;
4631 int len = skb->len;
4632
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004633 skb_push(skb, HCI_ACL_HDR_SIZE);
4634 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004635 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004636 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4637 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004638}
4639
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004640static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004641 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004642{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004643 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004644 struct hci_dev *hdev = conn->hdev;
4645 struct sk_buff *list;
4646
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004647 skb->len = skb_headlen(skb);
4648 skb->data_len = 0;
4649
4650 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004651
4652 switch (hdev->dev_type) {
4653 case HCI_BREDR:
4654 hci_add_acl_hdr(skb, conn->handle, flags);
4655 break;
4656 case HCI_AMP:
4657 hci_add_acl_hdr(skb, chan->handle, flags);
4658 break;
4659 default:
4660 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4661 return;
4662 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004663
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004664 list = skb_shinfo(skb)->frag_list;
4665 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004666 /* Non fragmented */
4667 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4668
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004669 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004670 } else {
4671 /* Fragmented */
4672 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4673
4674 skb_shinfo(skb)->frag_list = NULL;
4675
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004676 /* Queue all fragments atomically. We need to use spin_lock_bh
4677 * here because of 6LoWPAN links, as there this function is
4678 * called from softirq and using normal spin lock could cause
4679 * deadlocks.
4680 */
4681 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004683 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004684
4685 flags &= ~ACL_START;
4686 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004687 do {
4688 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004689
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004690 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004691 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692
4693 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4694
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004695 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004696 } while (list);
4697
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004698 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004700}
4701
4702void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4703{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004704 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004705
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004706 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004707
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004708 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004709
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004710 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712
4713/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004714void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715{
4716 struct hci_dev *hdev = conn->hdev;
4717 struct hci_sco_hdr hdr;
4718
4719 BT_DBG("%s len %d", hdev->name, skb->len);
4720
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004721 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 hdr.dlen = skb->len;
4723
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004724 skb_push(skb, HCI_SCO_HDR_SIZE);
4725 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004726 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004727
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004728 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004729
Linus Torvalds1da177e2005-04-16 15:20:36 -07004730 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004731 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733
4734/* ---- HCI TX task (outgoing data) ---- */
4735
4736/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004737static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4738 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739{
4740 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004741 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004742 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004744 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004745 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004746
4747 rcu_read_lock();
4748
4749 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004750 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004752
4753 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4754 continue;
4755
Linus Torvalds1da177e2005-04-16 15:20:36 -07004756 num++;
4757
4758 if (c->sent < min) {
4759 min = c->sent;
4760 conn = c;
4761 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004762
4763 if (hci_conn_num(hdev, type) == num)
4764 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004765 }
4766
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004767 rcu_read_unlock();
4768
Linus Torvalds1da177e2005-04-16 15:20:36 -07004769 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004770 int cnt, q;
4771
4772 switch (conn->type) {
4773 case ACL_LINK:
4774 cnt = hdev->acl_cnt;
4775 break;
4776 case SCO_LINK:
4777 case ESCO_LINK:
4778 cnt = hdev->sco_cnt;
4779 break;
4780 case LE_LINK:
4781 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4782 break;
4783 default:
4784 cnt = 0;
4785 BT_ERR("Unknown link type");
4786 }
4787
4788 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789 *quote = q ? q : 1;
4790 } else
4791 *quote = 0;
4792
4793 BT_DBG("conn %p quote %d", conn, *quote);
4794 return conn;
4795}
4796
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004797static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798{
4799 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004800 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004801
Ville Tervobae1f5d92011-02-10 22:38:53 -03004802 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004803
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004804 rcu_read_lock();
4805
Linus Torvalds1da177e2005-04-16 15:20:36 -07004806 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004807 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004808 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004809 BT_ERR("%s killing stalled connection %pMR",
4810 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004811 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004812 }
4813 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004814
4815 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004816}
4817
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004818static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4819 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004820{
4821 struct hci_conn_hash *h = &hdev->conn_hash;
4822 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004823 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004824 struct hci_conn *conn;
4825 int cnt, q, conn_num = 0;
4826
4827 BT_DBG("%s", hdev->name);
4828
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004829 rcu_read_lock();
4830
4831 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004832 struct hci_chan *tmp;
4833
4834 if (conn->type != type)
4835 continue;
4836
4837 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4838 continue;
4839
4840 conn_num++;
4841
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004842 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004843 struct sk_buff *skb;
4844
4845 if (skb_queue_empty(&tmp->data_q))
4846 continue;
4847
4848 skb = skb_peek(&tmp->data_q);
4849 if (skb->priority < cur_prio)
4850 continue;
4851
4852 if (skb->priority > cur_prio) {
4853 num = 0;
4854 min = ~0;
4855 cur_prio = skb->priority;
4856 }
4857
4858 num++;
4859
4860 if (conn->sent < min) {
4861 min = conn->sent;
4862 chan = tmp;
4863 }
4864 }
4865
4866 if (hci_conn_num(hdev, type) == conn_num)
4867 break;
4868 }
4869
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004870 rcu_read_unlock();
4871
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004872 if (!chan)
4873 return NULL;
4874
4875 switch (chan->conn->type) {
4876 case ACL_LINK:
4877 cnt = hdev->acl_cnt;
4878 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004879 case AMP_LINK:
4880 cnt = hdev->block_cnt;
4881 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004882 case SCO_LINK:
4883 case ESCO_LINK:
4884 cnt = hdev->sco_cnt;
4885 break;
4886 case LE_LINK:
4887 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4888 break;
4889 default:
4890 cnt = 0;
4891 BT_ERR("Unknown link type");
4892 }
4893
4894 q = cnt / num;
4895 *quote = q ? q : 1;
4896 BT_DBG("chan %p quote %d", chan, *quote);
4897 return chan;
4898}
4899
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004900static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4901{
4902 struct hci_conn_hash *h = &hdev->conn_hash;
4903 struct hci_conn *conn;
4904 int num = 0;
4905
4906 BT_DBG("%s", hdev->name);
4907
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004908 rcu_read_lock();
4909
4910 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004911 struct hci_chan *chan;
4912
4913 if (conn->type != type)
4914 continue;
4915
4916 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4917 continue;
4918
4919 num++;
4920
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004921 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004922 struct sk_buff *skb;
4923
4924 if (chan->sent) {
4925 chan->sent = 0;
4926 continue;
4927 }
4928
4929 if (skb_queue_empty(&chan->data_q))
4930 continue;
4931
4932 skb = skb_peek(&chan->data_q);
4933 if (skb->priority >= HCI_PRIO_MAX - 1)
4934 continue;
4935
4936 skb->priority = HCI_PRIO_MAX - 1;
4937
4938 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004939 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004940 }
4941
4942 if (hci_conn_num(hdev, type) == num)
4943 break;
4944 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004945
4946 rcu_read_unlock();
4947
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004948}
4949
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004950static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4951{
4952 /* Calculate count of blocks used by this packet */
4953 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4954}
4955
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004956static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004958 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959 /* ACL tx timeout must be longer than maximum
4960 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004961 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004962 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004963 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004965}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004967static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004968{
4969 unsigned int cnt = hdev->acl_cnt;
4970 struct hci_chan *chan;
4971 struct sk_buff *skb;
4972 int quote;
4973
4974 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004975
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004976 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004977 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004978 u32 priority = (skb_peek(&chan->data_q))->priority;
4979 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004980 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004981 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004982
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004983 /* Stop if priority has changed */
4984 if (skb->priority < priority)
4985 break;
4986
4987 skb = skb_dequeue(&chan->data_q);
4988
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004989 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004990 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004991
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004992 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993 hdev->acl_last_tx = jiffies;
4994
4995 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004996 chan->sent++;
4997 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004998 }
4999 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005000
5001 if (cnt != hdev->acl_cnt)
5002 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003}
5004
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005005static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005006{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005007 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005008 struct hci_chan *chan;
5009 struct sk_buff *skb;
5010 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005011 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005012
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005013 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005014
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005015 BT_DBG("%s", hdev->name);
5016
5017 if (hdev->dev_type == HCI_AMP)
5018 type = AMP_LINK;
5019 else
5020 type = ACL_LINK;
5021
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005022 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005023 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005024 u32 priority = (skb_peek(&chan->data_q))->priority;
5025 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5026 int blocks;
5027
5028 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005029 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005030
5031 /* Stop if priority has changed */
5032 if (skb->priority < priority)
5033 break;
5034
5035 skb = skb_dequeue(&chan->data_q);
5036
5037 blocks = __get_blocks(hdev, skb);
5038 if (blocks > hdev->block_cnt)
5039 return;
5040
5041 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005042 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005043
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005044 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005045 hdev->acl_last_tx = jiffies;
5046
5047 hdev->block_cnt -= blocks;
5048 quote -= blocks;
5049
5050 chan->sent += blocks;
5051 chan->conn->sent += blocks;
5052 }
5053 }
5054
5055 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005056 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005057}
5058
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005059static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005060{
5061 BT_DBG("%s", hdev->name);
5062
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005063 /* No ACL link over BR/EDR controller */
5064 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5065 return;
5066
5067 /* No AMP link over AMP controller */
5068 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005069 return;
5070
5071 switch (hdev->flow_ctl_mode) {
5072 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5073 hci_sched_acl_pkt(hdev);
5074 break;
5075
5076 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5077 hci_sched_acl_blk(hdev);
5078 break;
5079 }
5080}
5081
Linus Torvalds1da177e2005-04-16 15:20:36 -07005082/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005083static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084{
5085 struct hci_conn *conn;
5086 struct sk_buff *skb;
5087 int quote;
5088
5089 BT_DBG("%s", hdev->name);
5090
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005091 if (!hci_conn_num(hdev, SCO_LINK))
5092 return;
5093
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5095 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5096 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005097 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005098
5099 conn->sent++;
5100 if (conn->sent == ~0)
5101 conn->sent = 0;
5102 }
5103 }
5104}
5105
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005106static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005107{
5108 struct hci_conn *conn;
5109 struct sk_buff *skb;
5110 int quote;
5111
5112 BT_DBG("%s", hdev->name);
5113
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005114 if (!hci_conn_num(hdev, ESCO_LINK))
5115 return;
5116
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005117 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5118 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005119 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5120 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005121 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005122
5123 conn->sent++;
5124 if (conn->sent == ~0)
5125 conn->sent = 0;
5126 }
5127 }
5128}
5129
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005130static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005131{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005132 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005133 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005134 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005135
5136 BT_DBG("%s", hdev->name);
5137
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005138 if (!hci_conn_num(hdev, LE_LINK))
5139 return;
5140
Marcel Holtmann4a964402014-07-02 19:10:33 +02005141 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005142 /* LE tx timeout must be longer than maximum
5143 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005144 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005145 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005146 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005147 }
5148
5149 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005150 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005151 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005152 u32 priority = (skb_peek(&chan->data_q))->priority;
5153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005155 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005156
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005157 /* Stop if priority has changed */
5158 if (skb->priority < priority)
5159 break;
5160
5161 skb = skb_dequeue(&chan->data_q);
5162
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005163 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005164 hdev->le_last_tx = jiffies;
5165
5166 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005167 chan->sent++;
5168 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005169 }
5170 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005171
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 if (hdev->le_pkts)
5173 hdev->le_cnt = cnt;
5174 else
5175 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005176
5177 if (cnt != tmp)
5178 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005179}
5180
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005181static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005183 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005184 struct sk_buff *skb;
5185
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005186 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005187 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005188
Marcel Holtmann52de5992013-09-03 18:08:38 -07005189 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5190 /* Schedule queues and send stuff to HCI driver */
5191 hci_sched_acl(hdev);
5192 hci_sched_sco(hdev);
5193 hci_sched_esco(hdev);
5194 hci_sched_le(hdev);
5195 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005196
Linus Torvalds1da177e2005-04-16 15:20:36 -07005197 /* Send next queued raw (unknown type) packet */
5198 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005199 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005200}
5201
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005202/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203
5204/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005205static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206{
5207 struct hci_acl_hdr *hdr = (void *) skb->data;
5208 struct hci_conn *conn;
5209 __u16 handle, flags;
5210
5211 skb_pull(skb, HCI_ACL_HDR_SIZE);
5212
5213 handle = __le16_to_cpu(hdr->handle);
5214 flags = hci_flags(handle);
5215 handle = hci_handle(handle);
5216
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005217 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005218 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219
5220 hdev->stat.acl_rx++;
5221
5222 hci_dev_lock(hdev);
5223 conn = hci_conn_hash_lookup_handle(hdev, handle);
5224 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005225
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005227 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005228
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005230 l2cap_recv_acldata(conn, skb, flags);
5231 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005232 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005233 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005234 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005235 }
5236
5237 kfree_skb(skb);
5238}
5239
5240/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005241static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005242{
5243 struct hci_sco_hdr *hdr = (void *) skb->data;
5244 struct hci_conn *conn;
5245 __u16 handle;
5246
5247 skb_pull(skb, HCI_SCO_HDR_SIZE);
5248
5249 handle = __le16_to_cpu(hdr->handle);
5250
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005251 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252
5253 hdev->stat.sco_rx++;
5254
5255 hci_dev_lock(hdev);
5256 conn = hci_conn_hash_lookup_handle(hdev, handle);
5257 hci_dev_unlock(hdev);
5258
5259 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005260 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005261 sco_recv_scodata(conn, skb);
5262 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005263 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005264 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005265 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 }
5267
5268 kfree_skb(skb);
5269}
5270
Johan Hedberg9238f362013-03-05 20:37:48 +02005271static bool hci_req_is_complete(struct hci_dev *hdev)
5272{
5273 struct sk_buff *skb;
5274
5275 skb = skb_peek(&hdev->cmd_q);
5276 if (!skb)
5277 return true;
5278
5279 return bt_cb(skb)->req.start;
5280}
5281
Johan Hedberg42c6b122013-03-05 20:37:49 +02005282static void hci_resend_last(struct hci_dev *hdev)
5283{
5284 struct hci_command_hdr *sent;
5285 struct sk_buff *skb;
5286 u16 opcode;
5287
5288 if (!hdev->sent_cmd)
5289 return;
5290
5291 sent = (void *) hdev->sent_cmd->data;
5292 opcode = __le16_to_cpu(sent->opcode);
5293 if (opcode == HCI_OP_RESET)
5294 return;
5295
5296 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5297 if (!skb)
5298 return;
5299
5300 skb_queue_head(&hdev->cmd_q, skb);
5301 queue_work(hdev->workqueue, &hdev->cmd_work);
5302}
5303
Johan Hedberg9238f362013-03-05 20:37:48 +02005304void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5305{
5306 hci_req_complete_t req_complete = NULL;
5307 struct sk_buff *skb;
5308 unsigned long flags;
5309
5310 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5311
Johan Hedberg42c6b122013-03-05 20:37:49 +02005312 /* If the completed command doesn't match the last one that was
5313 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005314 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005315 if (!hci_sent_cmd_data(hdev, opcode)) {
5316 /* Some CSR based controllers generate a spontaneous
5317 * reset complete event during init and any pending
5318 * command will never be completed. In such a case we
5319 * need to resend whatever was the last sent
5320 * command.
5321 */
5322 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5323 hci_resend_last(hdev);
5324
Johan Hedberg9238f362013-03-05 20:37:48 +02005325 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005326 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005327
5328 /* If the command succeeded and there's still more commands in
5329 * this request the request is not yet complete.
5330 */
5331 if (!status && !hci_req_is_complete(hdev))
5332 return;
5333
5334 /* If this was the last command in a request the complete
5335 * callback would be found in hdev->sent_cmd instead of the
5336 * command queue (hdev->cmd_q).
5337 */
5338 if (hdev->sent_cmd) {
5339 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005340
5341 if (req_complete) {
5342 /* We must set the complete callback to NULL to
5343 * avoid calling the callback more than once if
5344 * this function gets called again.
5345 */
5346 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5347
Johan Hedberg9238f362013-03-05 20:37:48 +02005348 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005349 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005350 }
5351
5352 /* Remove all pending commands belonging to this request */
5353 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5354 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5355 if (bt_cb(skb)->req.start) {
5356 __skb_queue_head(&hdev->cmd_q, skb);
5357 break;
5358 }
5359
5360 req_complete = bt_cb(skb)->req.complete;
5361 kfree_skb(skb);
5362 }
5363 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5364
5365call_complete:
5366 if (req_complete)
5367 req_complete(hdev, status);
5368}
5369
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005370static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005371{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005372 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 struct sk_buff *skb;
5374
5375 BT_DBG("%s", hdev->name);
5376
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005378 /* Send copy to monitor */
5379 hci_send_to_monitor(hdev, skb);
5380
Linus Torvalds1da177e2005-04-16 15:20:36 -07005381 if (atomic_read(&hdev->promisc)) {
5382 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005383 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005384 }
5385
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005386 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005387 kfree_skb(skb);
5388 continue;
5389 }
5390
5391 if (test_bit(HCI_INIT, &hdev->flags)) {
5392 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005393 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 case HCI_ACLDATA_PKT:
5395 case HCI_SCODATA_PKT:
5396 kfree_skb(skb);
5397 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399 }
5400
5401 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005402 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005403 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005404 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005405 hci_event_packet(hdev, skb);
5406 break;
5407
5408 case HCI_ACLDATA_PKT:
5409 BT_DBG("%s ACL data packet", hdev->name);
5410 hci_acldata_packet(hdev, skb);
5411 break;
5412
5413 case HCI_SCODATA_PKT:
5414 BT_DBG("%s SCO data packet", hdev->name);
5415 hci_scodata_packet(hdev, skb);
5416 break;
5417
5418 default:
5419 kfree_skb(skb);
5420 break;
5421 }
5422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423}
5424
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005425static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005427 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428 struct sk_buff *skb;
5429
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005430 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5431 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432
Linus Torvalds1da177e2005-04-16 15:20:36 -07005433 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005434 if (atomic_read(&hdev->cmd_cnt)) {
5435 skb = skb_dequeue(&hdev->cmd_q);
5436 if (!skb)
5437 return;
5438
Wei Yongjun7585b972009-02-25 18:29:52 +08005439 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005441 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005442 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005443 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005444 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005445 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005446 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005447 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005448 schedule_delayed_work(&hdev->cmd_timer,
5449 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005450 } else {
5451 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005452 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005453 }
5454 }
5455}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005456
5457void hci_req_add_le_scan_disable(struct hci_request *req)
5458{
5459 struct hci_cp_le_set_scan_enable cp;
5460
5461 memset(&cp, 0, sizeof(cp));
5462 cp.enable = LE_SCAN_DISABLE;
5463 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5464}
Andre Guedesa4790db2014-02-26 20:21:47 -03005465
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005466static void add_to_white_list(struct hci_request *req,
5467 struct hci_conn_params *params)
5468{
5469 struct hci_cp_le_add_to_white_list cp;
5470
5471 cp.bdaddr_type = params->addr_type;
5472 bacpy(&cp.bdaddr, &params->addr);
5473
5474 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5475}
5476
5477static u8 update_white_list(struct hci_request *req)
5478{
5479 struct hci_dev *hdev = req->hdev;
5480 struct hci_conn_params *params;
5481 struct bdaddr_list *b;
5482 uint8_t white_list_entries = 0;
5483
5484 /* Go through the current white list programmed into the
5485 * controller one by one and check if that address is still
5486 * in the list of pending connections or list of devices to
5487 * report. If not present in either list, then queue the
5488 * command to remove it from the controller.
5489 */
5490 list_for_each_entry(b, &hdev->le_white_list, list) {
5491 struct hci_cp_le_del_from_white_list cp;
5492
5493 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5494 &b->bdaddr, b->bdaddr_type) ||
5495 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5496 &b->bdaddr, b->bdaddr_type)) {
5497 white_list_entries++;
5498 continue;
5499 }
5500
5501 cp.bdaddr_type = b->bdaddr_type;
5502 bacpy(&cp.bdaddr, &b->bdaddr);
5503
5504 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5505 sizeof(cp), &cp);
5506 }
5507
5508 /* Since all no longer valid white list entries have been
5509 * removed, walk through the list of pending connections
5510 * and ensure that any new device gets programmed into
5511 * the controller.
5512 *
5513 * If the list of the devices is larger than the list of
5514 * available white list entries in the controller, then
5515 * just abort and return filer policy value to not use the
5516 * white list.
5517 */
5518 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5519 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5520 &params->addr, params->addr_type))
5521 continue;
5522
5523 if (white_list_entries >= hdev->le_white_list_size) {
5524 /* Select filter policy to accept all advertising */
5525 return 0x00;
5526 }
5527
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005528 if (hci_find_irk_by_addr(hdev, &params->addr,
5529 params->addr_type)) {
5530 /* White list can not be used with RPAs */
5531 return 0x00;
5532 }
5533
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005534 white_list_entries++;
5535 add_to_white_list(req, params);
5536 }
5537
5538 /* After adding all new pending connections, walk through
5539 * the list of pending reports and also add these to the
5540 * white list if there is still space.
5541 */
5542 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5543 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5544 &params->addr, params->addr_type))
5545 continue;
5546
5547 if (white_list_entries >= hdev->le_white_list_size) {
5548 /* Select filter policy to accept all advertising */
5549 return 0x00;
5550 }
5551
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005552 if (hci_find_irk_by_addr(hdev, &params->addr,
5553 params->addr_type)) {
5554 /* White list can not be used with RPAs */
5555 return 0x00;
5556 }
5557
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005558 white_list_entries++;
5559 add_to_white_list(req, params);
5560 }
5561
5562 /* Select filter policy to use white list */
5563 return 0x01;
5564}
5565
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005566void hci_req_add_le_passive_scan(struct hci_request *req)
5567{
5568 struct hci_cp_le_set_scan_param param_cp;
5569 struct hci_cp_le_set_scan_enable enable_cp;
5570 struct hci_dev *hdev = req->hdev;
5571 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005572 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005573
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005574 /* Set require_privacy to false since no SCAN_REQ are send
5575 * during passive scanning. Not using an unresolvable address
5576 * here is important so that peer devices using direct
5577 * advertising with our address will be correctly reported
5578 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005579 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005580 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005581 return;
5582
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005583 /* Adding or removing entries from the white list must
5584 * happen before enabling scanning. The controller does
5585 * not allow white list modification while scanning.
5586 */
5587 filter_policy = update_white_list(req);
5588
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005589 memset(&param_cp, 0, sizeof(param_cp));
5590 param_cp.type = LE_SCAN_PASSIVE;
5591 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5592 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5593 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005594 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005595 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5596 &param_cp);
5597
5598 memset(&enable_cp, 0, sizeof(enable_cp));
5599 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005600 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005601 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5602 &enable_cp);
5603}
5604
Andre Guedesa4790db2014-02-26 20:21:47 -03005605static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5606{
5607 if (status)
5608 BT_DBG("HCI request failed to update background scanning: "
5609 "status 0x%2.2x", status);
5610}
5611
5612/* This function controls the background scanning based on hdev->pend_le_conns
5613 * list. If there are pending LE connection we start the background scanning,
5614 * otherwise we stop it.
5615 *
5616 * This function requires the caller holds hdev->lock.
5617 */
5618void hci_update_background_scan(struct hci_dev *hdev)
5619{
Andre Guedesa4790db2014-02-26 20:21:47 -03005620 struct hci_request req;
5621 struct hci_conn *conn;
5622 int err;
5623
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005624 if (!test_bit(HCI_UP, &hdev->flags) ||
5625 test_bit(HCI_INIT, &hdev->flags) ||
5626 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005627 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005628 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005629 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005630 return;
5631
Johan Hedberga70f4b52014-07-07 15:19:50 +03005632 /* No point in doing scanning if LE support hasn't been enabled */
5633 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5634 return;
5635
Johan Hedbergae23ada2014-07-07 13:24:59 +03005636 /* If discovery is active don't interfere with it */
5637 if (hdev->discovery.state != DISCOVERY_STOPPED)
5638 return;
5639
Andre Guedesa4790db2014-02-26 20:21:47 -03005640 hci_req_init(&req, hdev);
5641
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005642 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005643 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005644 /* If there is no pending LE connections or devices
5645 * to be scanned for, we should stop the background
5646 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005647 */
5648
5649 /* If controller is not scanning we are done. */
5650 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5651 return;
5652
5653 hci_req_add_le_scan_disable(&req);
5654
5655 BT_DBG("%s stopping background scanning", hdev->name);
5656 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005657 /* If there is at least one pending LE connection, we should
5658 * keep the background scan running.
5659 */
5660
Andre Guedesa4790db2014-02-26 20:21:47 -03005661 /* If controller is connecting, we should not start scanning
5662 * since some controllers are not able to scan and connect at
5663 * the same time.
5664 */
5665 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5666 if (conn)
5667 return;
5668
Andre Guedes4340a122014-03-10 18:26:24 -03005669 /* If controller is currently scanning, we stop it to ensure we
5670 * don't miss any advertising (due to duplicates filter).
5671 */
5672 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5673 hci_req_add_le_scan_disable(&req);
5674
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005675 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005676
5677 BT_DBG("%s starting background scanning", hdev->name);
5678 }
5679
5680 err = hci_req_run(&req, update_background_scan_complete);
5681 if (err)
5682 BT_ERR("Failed to run HCI request: err %d", err);
5683}
Johan Hedberg432df052014-08-01 11:13:31 +03005684
Johan Hedberg22f433d2014-08-01 11:13:32 +03005685static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5686{
5687 struct bdaddr_list *b;
5688
5689 list_for_each_entry(b, &hdev->whitelist, list) {
5690 struct hci_conn *conn;
5691
5692 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5693 if (!conn)
5694 return true;
5695
5696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5697 return true;
5698 }
5699
5700 return false;
5701}
5702
Johan Hedberg432df052014-08-01 11:13:31 +03005703void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5704{
5705 u8 scan;
5706
5707 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5708 return;
5709
5710 if (!hdev_is_powered(hdev))
5711 return;
5712
5713 if (mgmt_powering_down(hdev))
5714 return;
5715
5716 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005717 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005718 scan = SCAN_PAGE;
5719 else
5720 scan = SCAN_DISABLED;
5721
5722 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5723 return;
5724
5725 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5726 scan |= SCAN_INQUIRY;
5727
5728 if (req)
5729 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5730 else
5731 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5732}