blob: c9495fb9f595e66bf471d6512d22e0c0e6dde7f6 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
Marcel Holtmann041000b2013-10-17 12:02:31 -0700326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
Marcel Holtmann111902f2014-06-21 04:53:17 +0200371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 return -EALREADY;
398
Marcel Holtmann111902f2014-06-21 04:53:17 +0200399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700437 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700496 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700524 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
Marcel Holtmannac345812014-02-23 12:44:25 -0800600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200603 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800609
Johan Hedberga1f4c312014-02-27 14:05:41 +0200610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800611 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700679{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700682
Marcel Holtmann111902f2014-06-21 04:53:17 +0200683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687}
688
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
692{
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
697
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
700
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700706 return -EINVAL;
707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700710
Marcel Holtmann111902f2014-06-21 04:53:17 +0200711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800712
713 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700714}
715
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
Marcel Holtmann92202182013-10-18 16:38:10 -0700722
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
Marcel Holtmann3698d702014-02-18 21:54:49 -0800748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct list_head *p, *n;
752
753 hci_dev_lock(hdev);
754 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
755 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
756 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
757 &irk->bdaddr, irk->addr_type,
758 16, irk->val, &irk->rpa);
759 }
760 hci_dev_unlock(hdev);
761
762 return 0;
763}
764
765static int identity_resolving_keys_open(struct inode *inode, struct file *file)
766{
767 return single_open(file, identity_resolving_keys_show,
768 inode->i_private);
769}
770
771static const struct file_operations identity_resolving_keys_fops = {
772 .open = identity_resolving_keys_open,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
776};
777
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778static int long_term_keys_show(struct seq_file *f, void *ptr)
779{
780 struct hci_dev *hdev = f->private;
Johan Hedberg970d0f12014-11-13 14:37:47 +0200781 struct smp_ltk *ltk;
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700782
Johan Hedberg970d0f12014-11-13 14:37:47 +0200783 rcu_read_lock();
784 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800785 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700786 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
787 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800788 __le64_to_cpu(ltk->rand), 16, ltk->val);
Johan Hedberg970d0f12014-11-13 14:37:47 +0200789 rcu_read_unlock();
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700790
791 return 0;
792}
793
794static int long_term_keys_open(struct inode *inode, struct file *file)
795{
796 return single_open(file, long_term_keys_show, inode->i_private);
797}
798
799static const struct file_operations long_term_keys_fops = {
800 .open = long_term_keys_open,
801 .read = seq_read,
802 .llseek = seq_lseek,
803 .release = single_release,
804};
805
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700806static int conn_min_interval_set(void *data, u64 val)
807{
808 struct hci_dev *hdev = data;
809
810 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
811 return -EINVAL;
812
813 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700814 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700815 hci_dev_unlock(hdev);
816
817 return 0;
818}
819
820static int conn_min_interval_get(void *data, u64 *val)
821{
822 struct hci_dev *hdev = data;
823
824 hci_dev_lock(hdev);
825 *val = hdev->le_conn_min_interval;
826 hci_dev_unlock(hdev);
827
828 return 0;
829}
830
831DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
832 conn_min_interval_set, "%llu\n");
833
834static int conn_max_interval_set(void *data, u64 val)
835{
836 struct hci_dev *hdev = data;
837
838 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
839 return -EINVAL;
840
841 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700842 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700843 hci_dev_unlock(hdev);
844
845 return 0;
846}
847
848static int conn_max_interval_get(void *data, u64 *val)
849{
850 struct hci_dev *hdev = data;
851
852 hci_dev_lock(hdev);
853 *val = hdev->le_conn_max_interval;
854 hci_dev_unlock(hdev);
855
856 return 0;
857}
858
859DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
860 conn_max_interval_set, "%llu\n");
861
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200862static int conn_latency_set(void *data, u64 val)
863{
864 struct hci_dev *hdev = data;
865
866 if (val > 0x01f3)
867 return -EINVAL;
868
869 hci_dev_lock(hdev);
870 hdev->le_conn_latency = val;
871 hci_dev_unlock(hdev);
872
873 return 0;
874}
875
876static int conn_latency_get(void *data, u64 *val)
877{
878 struct hci_dev *hdev = data;
879
880 hci_dev_lock(hdev);
881 *val = hdev->le_conn_latency;
882 hci_dev_unlock(hdev);
883
884 return 0;
885}
886
887DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
888 conn_latency_set, "%llu\n");
889
Marcel Holtmannf1649572014-06-30 12:34:38 +0200890static int supervision_timeout_set(void *data, u64 val)
891{
892 struct hci_dev *hdev = data;
893
894 if (val < 0x000a || val > 0x0c80)
895 return -EINVAL;
896
897 hci_dev_lock(hdev);
898 hdev->le_supv_timeout = val;
899 hci_dev_unlock(hdev);
900
901 return 0;
902}
903
904static int supervision_timeout_get(void *data, u64 *val)
905{
906 struct hci_dev *hdev = data;
907
908 hci_dev_lock(hdev);
909 *val = hdev->le_supv_timeout;
910 hci_dev_unlock(hdev);
911
912 return 0;
913}
914
915DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
916 supervision_timeout_set, "%llu\n");
917
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800918static int adv_channel_map_set(void *data, u64 val)
919{
920 struct hci_dev *hdev = data;
921
922 if (val < 0x01 || val > 0x07)
923 return -EINVAL;
924
925 hci_dev_lock(hdev);
926 hdev->le_adv_channel_map = val;
927 hci_dev_unlock(hdev);
928
929 return 0;
930}
931
932static int adv_channel_map_get(void *data, u64 *val)
933{
934 struct hci_dev *hdev = data;
935
936 hci_dev_lock(hdev);
937 *val = hdev->le_adv_channel_map;
938 hci_dev_unlock(hdev);
939
940 return 0;
941}
942
943DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
944 adv_channel_map_set, "%llu\n");
945
Georg Lukas729a1052014-07-26 13:59:58 +0200946static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200947{
Georg Lukas729a1052014-07-26 13:59:58 +0200948 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200949
Georg Lukas729a1052014-07-26 13:59:58 +0200950 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200951 return -EINVAL;
952
Andre Guedes7d474e02014-02-26 20:21:54 -0300953 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200954 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300955 hci_dev_unlock(hdev);
956
957 return 0;
958}
959
Georg Lukas729a1052014-07-26 13:59:58 +0200960static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300961{
Georg Lukas729a1052014-07-26 13:59:58 +0200962 struct hci_dev *hdev = data;
963
964 hci_dev_lock(hdev);
965 *val = hdev->le_adv_min_interval;
966 hci_dev_unlock(hdev);
967
968 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300969}
970
Georg Lukas729a1052014-07-26 13:59:58 +0200971DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
972 adv_min_interval_set, "%llu\n");
973
974static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300975{
Georg Lukas729a1052014-07-26 13:59:58 +0200976 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300977
Georg Lukas729a1052014-07-26 13:59:58 +0200978 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300979 return -EINVAL;
980
Georg Lukas729a1052014-07-26 13:59:58 +0200981 hci_dev_lock(hdev);
982 hdev->le_adv_max_interval = val;
983 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300984
Georg Lukas729a1052014-07-26 13:59:58 +0200985 return 0;
986}
Andre Guedes7d474e02014-02-26 20:21:54 -0300987
Georg Lukas729a1052014-07-26 13:59:58 +0200988static int adv_max_interval_get(void *data, u64 *val)
989{
990 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300991
Georg Lukas729a1052014-07-26 13:59:58 +0200992 hci_dev_lock(hdev);
993 *val = hdev->le_adv_max_interval;
994 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300995
Georg Lukas729a1052014-07-26 13:59:58 +0200996 return 0;
997}
Andre Guedes7d474e02014-02-26 20:21:54 -0300998
Georg Lukas729a1052014-07-26 13:59:58 +0200999DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1000 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001001
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001002static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001003{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001004 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001005 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001006 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001007
Andre Guedes7d474e02014-02-26 20:21:54 -03001008 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001009 list_for_each_entry(b, &hdev->whitelist, list)
1010 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001011 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001012 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001013 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001014 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001015 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001016
Andre Guedes7d474e02014-02-26 20:21:54 -03001017 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001018}
1019
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001020static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001021{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001023}
1024
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001025static const struct file_operations device_list_fops = {
1026 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001027 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001028 .llseek = seq_lseek,
1029 .release = single_release,
1030};
1031
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032/* ---- HCI requests ---- */
1033
Johan Hedberg42c6b122013-03-05 20:37:49 +02001034static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037
1038 if (hdev->req_status == HCI_REQ_PEND) {
1039 hdev->req_result = result;
1040 hdev->req_status = HCI_REQ_DONE;
1041 wake_up_interruptible(&hdev->req_wait_q);
1042 }
1043}
1044
1045static void hci_req_cancel(struct hci_dev *hdev, int err)
1046{
1047 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1048
1049 if (hdev->req_status == HCI_REQ_PEND) {
1050 hdev->req_result = err;
1051 hdev->req_status = HCI_REQ_CANCELED;
1052 wake_up_interruptible(&hdev->req_wait_q);
1053 }
1054}
1055
Fengguang Wu77a63e02013-04-20 16:24:31 +03001056static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1057 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001058{
1059 struct hci_ev_cmd_complete *ev;
1060 struct hci_event_hdr *hdr;
1061 struct sk_buff *skb;
1062
1063 hci_dev_lock(hdev);
1064
1065 skb = hdev->recv_evt;
1066 hdev->recv_evt = NULL;
1067
1068 hci_dev_unlock(hdev);
1069
1070 if (!skb)
1071 return ERR_PTR(-ENODATA);
1072
1073 if (skb->len < sizeof(*hdr)) {
1074 BT_ERR("Too short HCI event");
1075 goto failed;
1076 }
1077
1078 hdr = (void *) skb->data;
1079 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1080
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001081 if (event) {
1082 if (hdr->evt != event)
1083 goto failed;
1084 return skb;
1085 }
1086
Johan Hedberg75e84b72013-04-02 13:35:04 +03001087 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1088 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1089 goto failed;
1090 }
1091
1092 if (skb->len < sizeof(*ev)) {
1093 BT_ERR("Too short cmd_complete event");
1094 goto failed;
1095 }
1096
1097 ev = (void *) skb->data;
1098 skb_pull(skb, sizeof(*ev));
1099
1100 if (opcode == __le16_to_cpu(ev->opcode))
1101 return skb;
1102
1103 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1104 __le16_to_cpu(ev->opcode));
1105
1106failed:
1107 kfree_skb(skb);
1108 return ERR_PTR(-ENODATA);
1109}
1110
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001111struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001112 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001113{
1114 DECLARE_WAITQUEUE(wait, current);
1115 struct hci_request req;
1116 int err = 0;
1117
1118 BT_DBG("%s", hdev->name);
1119
1120 hci_req_init(&req, hdev);
1121
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001122 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001123
1124 hdev->req_status = HCI_REQ_PEND;
1125
Johan Hedberg75e84b72013-04-02 13:35:04 +03001126 add_wait_queue(&hdev->req_wait_q, &wait);
1127 set_current_state(TASK_INTERRUPTIBLE);
1128
Chan-yeol Park039fada2014-10-31 14:23:06 +09001129 err = hci_req_run(&req, hci_req_sync_complete);
1130 if (err < 0) {
1131 remove_wait_queue(&hdev->req_wait_q, &wait);
1132 return ERR_PTR(err);
1133 }
1134
Johan Hedberg75e84b72013-04-02 13:35:04 +03001135 schedule_timeout(timeout);
1136
1137 remove_wait_queue(&hdev->req_wait_q, &wait);
1138
1139 if (signal_pending(current))
1140 return ERR_PTR(-EINTR);
1141
1142 switch (hdev->req_status) {
1143 case HCI_REQ_DONE:
1144 err = -bt_to_errno(hdev->req_result);
1145 break;
1146
1147 case HCI_REQ_CANCELED:
1148 err = -hdev->req_result;
1149 break;
1150
1151 default:
1152 err = -ETIMEDOUT;
1153 break;
1154 }
1155
1156 hdev->req_status = hdev->req_result = 0;
1157
1158 BT_DBG("%s end: err %d", hdev->name, err);
1159
1160 if (err < 0)
1161 return ERR_PTR(err);
1162
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001163 return hci_get_cmd_complete(hdev, opcode, event);
1164}
1165EXPORT_SYMBOL(__hci_cmd_sync_ev);
1166
1167struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001168 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001169{
1170 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001171}
1172EXPORT_SYMBOL(__hci_cmd_sync);
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001175static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001176 void (*func)(struct hci_request *req,
1177 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001178 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001180 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 DECLARE_WAITQUEUE(wait, current);
1182 int err = 0;
1183
1184 BT_DBG("%s start", hdev->name);
1185
Johan Hedberg42c6b122013-03-05 20:37:49 +02001186 hci_req_init(&req, hdev);
1187
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 hdev->req_status = HCI_REQ_PEND;
1189
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001191
Chan-yeol Park039fada2014-10-31 14:23:06 +09001192 add_wait_queue(&hdev->req_wait_q, &wait);
1193 set_current_state(TASK_INTERRUPTIBLE);
1194
Johan Hedberg42c6b122013-03-05 20:37:49 +02001195 err = hci_req_run(&req, hci_req_sync_complete);
1196 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001197 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001198
Chan-yeol Park039fada2014-10-31 14:23:06 +09001199 remove_wait_queue(&hdev->req_wait_q, &wait);
1200
Andre Guedes920c8302013-03-08 11:20:15 -03001201 /* ENODATA means the HCI request command queue is empty.
1202 * This can happen when a request with conditionals doesn't
1203 * trigger any commands to be sent. This is normal behavior
1204 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001205 */
Andre Guedes920c8302013-03-08 11:20:15 -03001206 if (err == -ENODATA)
1207 return 0;
1208
1209 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001210 }
1211
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 schedule_timeout(timeout);
1213
1214 remove_wait_queue(&hdev->req_wait_q, &wait);
1215
1216 if (signal_pending(current))
1217 return -EINTR;
1218
1219 switch (hdev->req_status) {
1220 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001221 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 break;
1223
1224 case HCI_REQ_CANCELED:
1225 err = -hdev->req_result;
1226 break;
1227
1228 default:
1229 err = -ETIMEDOUT;
1230 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Johan Hedberga5040ef2011-01-10 13:28:59 +02001233 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 BT_DBG("%s end: err %d", hdev->name, err);
1236
1237 return err;
1238}
1239
Johan Hedberg01178cd2013-03-05 20:37:41 +02001240static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241 void (*req)(struct hci_request *req,
1242 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001243 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
1245 int ret;
1246
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001247 if (!test_bit(HCI_UP, &hdev->flags))
1248 return -ENETDOWN;
1249
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 /* Serialize all requests */
1251 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001252 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 hci_req_unlock(hdev);
1254
1255 return ret;
1256}
1257
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
1262 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001263 set_bit(HCI_RESET, &req->hdev->flags);
1264 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265}
1266
Johan Hedberg42c6b122013-03-05 20:37:49 +02001267static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001270
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001274 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276
1277 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001278 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279}
1280
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001282{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001284
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001285 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001286 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001287
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001288 /* Read Local Supported Commands */
1289 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1290
1291 /* Read Local Supported Features */
1292 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1293
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001294 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001296
1297 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001299
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001300 /* Read Flow Control Mode */
1301 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1302
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001303 /* Read Location Data */
1304 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001305}
1306
Johan Hedberg42c6b122013-03-05 20:37:49 +02001307static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001308{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001310
1311 BT_DBG("%s %ld", hdev->name, opt);
1312
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001313 /* Reset */
1314 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001315 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001316
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001317 switch (hdev->dev_type) {
1318 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001319 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001320 break;
1321
1322 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001324 break;
1325
1326 default:
1327 BT_ERR("Unknown device type %d", hdev->dev_type);
1328 break;
1329 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001330}
1331
Johan Hedberg42c6b122013-03-05 20:37:49 +02001332static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001333{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001334 struct hci_dev *hdev = req->hdev;
1335
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336 __le16 param;
1337 __u8 flt_type;
1338
1339 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001341
1342 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001343 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001344
1345 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001346 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001347
1348 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001349 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001350
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001351 /* Read Number of Supported IAC */
1352 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1353
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001354 /* Read Current IAC LAP */
1355 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1356
Johan Hedberg2177bab2013-03-05 20:37:43 +02001357 /* Clear Event Filters */
1358 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001359 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001360
1361 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001362 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001363 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001364
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001365 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1366 * but it does not support page scan related HCI commands.
1367 */
1368 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001372}
1373
Johan Hedberg42c6b122013-03-05 20:37:49 +02001374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001375{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001376 struct hci_dev *hdev = req->hdev;
1377
Johan Hedberg2177bab2013-03-05 20:37:43 +02001378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380
1381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001384 /* Read LE Supported States */
1385 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1386
Johan Hedberg2177bab2013-03-05 20:37:43 +02001387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001390 /* Clear LE White List */
1391 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001392
1393 /* LE-only controllers have LE implicitly enabled */
1394 if (!lmp_bredr_capable(hdev))
1395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001396}
1397
1398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1399{
1400 if (lmp_ext_inq_capable(hdev))
1401 return 0x02;
1402
1403 if (lmp_inq_rssi_capable(hdev))
1404 return 0x01;
1405
1406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1407 hdev->lmp_subver == 0x0757)
1408 return 0x01;
1409
1410 if (hdev->manufacturer == 15) {
1411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1412 return 0x01;
1413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1414 return 0x01;
1415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1416 return 0x01;
1417 }
1418
1419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1420 hdev->lmp_subver == 0x1805)
1421 return 0x01;
1422
1423 return 0x00;
1424}
1425
Johan Hedberg42c6b122013-03-05 20:37:49 +02001426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001427{
1428 u8 mode;
1429
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433}
1434
Johan Hedberg42c6b122013-03-05 20:37:49 +02001435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001436{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001437 struct hci_dev *hdev = req->hdev;
1438
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439 /* The second byte is 0xff instead of 0x9f (two reserved bits
1440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1441 * command otherwise.
1442 */
1443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1444
1445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1446 * any event mask for pre 1.2 devices.
1447 */
1448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1449 return;
1450
1451 if (lmp_bredr_capable(hdev)) {
1452 events[4] |= 0x01; /* Flow Specification Complete */
1453 events[4] |= 0x02; /* Inquiry Result with RSSI */
1454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1455 events[5] |= 0x08; /* Synchronous Connection Complete */
1456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001457 } else {
1458 /* Use a different default for LE-only devices */
1459 memset(events, 0, sizeof(events));
1460 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001461 events[1] |= 0x08; /* Read Remote Version Information Complete */
1462 events[1] |= 0x20; /* Command Complete */
1463 events[1] |= 0x40; /* Command Status */
1464 events[1] |= 0x80; /* Hardware Error */
1465 events[2] |= 0x04; /* Number of Completed Packets */
1466 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001467
1468 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1469 events[0] |= 0x80; /* Encryption Change */
1470 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1471 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472 }
1473
1474 if (lmp_inq_rssi_capable(hdev))
1475 events[4] |= 0x02; /* Inquiry Result with RSSI */
1476
1477 if (lmp_sniffsubr_capable(hdev))
1478 events[5] |= 0x20; /* Sniff Subrating */
1479
1480 if (lmp_pause_enc_capable(hdev))
1481 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1482
1483 if (lmp_ext_inq_capable(hdev))
1484 events[5] |= 0x40; /* Extended Inquiry Result */
1485
1486 if (lmp_no_flush_capable(hdev))
1487 events[7] |= 0x01; /* Enhanced Flush Complete */
1488
1489 if (lmp_lsto_capable(hdev))
1490 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1491
1492 if (lmp_ssp_capable(hdev)) {
1493 events[6] |= 0x01; /* IO Capability Request */
1494 events[6] |= 0x02; /* IO Capability Response */
1495 events[6] |= 0x04; /* User Confirmation Request */
1496 events[6] |= 0x08; /* User Passkey Request */
1497 events[6] |= 0x10; /* Remote OOB Data Request */
1498 events[6] |= 0x20; /* Simple Pairing Complete */
1499 events[7] |= 0x04; /* User Passkey Notification */
1500 events[7] |= 0x08; /* Keypress Notification */
1501 events[7] |= 0x10; /* Remote Host Supported
1502 * Features Notification
1503 */
1504 }
1505
1506 if (lmp_le_capable(hdev))
1507 events[7] |= 0x20; /* LE Meta-Event */
1508
Johan Hedberg42c6b122013-03-05 20:37:49 +02001509 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001510}
1511
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514 struct hci_dev *hdev = req->hdev;
1515
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001517 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001518 else
1519 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001520
1521 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001522 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001524 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1525 * local supported commands HCI command.
1526 */
1527 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001528 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529
1530 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001531 /* When SSP is available, then the host features page
1532 * should also be available as well. However some
1533 * controllers list the max_page as 0 as long as SSP
1534 * has not been enabled. To achieve proper debugging
1535 * output, force the minimum max_page to 1 at least.
1536 */
1537 hdev->max_page = 0x01;
1538
Johan Hedberg2177bab2013-03-05 20:37:43 +02001539 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1540 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001541 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1542 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001543 } else {
1544 struct hci_cp_write_eir cp;
1545
1546 memset(hdev->eir, 0, sizeof(hdev->eir));
1547 memset(&cp, 0, sizeof(cp));
1548
Johan Hedberg42c6b122013-03-05 20:37:49 +02001549 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001550 }
1551 }
1552
1553 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001554 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001555
1556 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001557 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001558
1559 if (lmp_ext_feat_capable(hdev)) {
1560 struct hci_cp_read_local_ext_features cp;
1561
1562 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001563 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1564 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001565 }
1566
1567 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1568 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001569 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1570 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001571 }
1572}
1573
Johan Hedberg42c6b122013-03-05 20:37:49 +02001574static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001575{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001576 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001577 struct hci_cp_write_def_link_policy cp;
1578 u16 link_policy = 0;
1579
1580 if (lmp_rswitch_capable(hdev))
1581 link_policy |= HCI_LP_RSWITCH;
1582 if (lmp_hold_capable(hdev))
1583 link_policy |= HCI_LP_HOLD;
1584 if (lmp_sniff_capable(hdev))
1585 link_policy |= HCI_LP_SNIFF;
1586 if (lmp_park_capable(hdev))
1587 link_policy |= HCI_LP_PARK;
1588
1589 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001590 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001591}
1592
Johan Hedberg42c6b122013-03-05 20:37:49 +02001593static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001594{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001595 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001596 struct hci_cp_write_le_host_supported cp;
1597
Johan Hedbergc73eee92013-04-19 18:35:21 +03001598 /* LE-only devices do not support explicit enablement */
1599 if (!lmp_bredr_capable(hdev))
1600 return;
1601
Johan Hedberg2177bab2013-03-05 20:37:43 +02001602 memset(&cp, 0, sizeof(cp));
1603
1604 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1605 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001606 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001607 }
1608
1609 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001610 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1611 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001612}
1613
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001614static void hci_set_event_mask_page_2(struct hci_request *req)
1615{
1616 struct hci_dev *hdev = req->hdev;
1617 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1618
1619 /* If Connectionless Slave Broadcast master role is supported
1620 * enable all necessary events for it.
1621 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001622 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001623 events[1] |= 0x40; /* Triggered Clock Capture */
1624 events[1] |= 0x80; /* Synchronization Train Complete */
1625 events[2] |= 0x10; /* Slave Page Response Timeout */
1626 events[2] |= 0x20; /* CSB Channel Map Change */
1627 }
1628
1629 /* If Connectionless Slave Broadcast slave role is supported
1630 * enable all necessary events for it.
1631 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001632 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001633 events[2] |= 0x01; /* Synchronization Train Received */
1634 events[2] |= 0x02; /* CSB Receive */
1635 events[2] |= 0x04; /* CSB Timeout */
1636 events[2] |= 0x08; /* Truncated Page Complete */
1637 }
1638
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001639 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001640 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001641 events[2] |= 0x80;
1642
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001643 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1644}
1645
Johan Hedberg42c6b122013-03-05 20:37:49 +02001646static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001647{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001648 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001649 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001650
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001651 hci_setup_event_mask(req);
1652
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001653 /* Some Broadcom based Bluetooth controllers do not support the
1654 * Delete Stored Link Key command. They are clearly indicating its
1655 * absence in the bit mask of supported commands.
1656 *
1657 * Check the supported commands and only if the the command is marked
1658 * as supported send it. If not supported assume that the controller
1659 * does not have actual support for stored link keys which makes this
1660 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001661 *
1662 * Some controllers indicate that they support handling deleting
1663 * stored link keys, but they don't. The quirk lets a driver
1664 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001665 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001666 if (hdev->commands[6] & 0x80 &&
1667 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001668 struct hci_cp_delete_stored_link_key cp;
1669
1670 bacpy(&cp.bdaddr, BDADDR_ANY);
1671 cp.delete_all = 0x01;
1672 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1673 sizeof(cp), &cp);
1674 }
1675
Johan Hedberg2177bab2013-03-05 20:37:43 +02001676 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001677 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001678
Andre Guedes9193c6e2014-07-01 18:10:09 -03001679 if (lmp_le_capable(hdev)) {
1680 u8 events[8];
1681
1682 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001683 events[0] = 0x0f;
1684
1685 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1686 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001687
1688 /* If controller supports the Connection Parameters Request
1689 * Link Layer Procedure, enable the corresponding event.
1690 */
1691 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1692 events[0] |= 0x20; /* LE Remote Connection
1693 * Parameter Request
1694 */
1695
Andre Guedes9193c6e2014-07-01 18:10:09 -03001696 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1697 events);
1698
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001699 if (hdev->commands[25] & 0x40) {
1700 /* Read LE Advertising Channel TX Power */
1701 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1702 }
1703
Johan Hedberg42c6b122013-03-05 20:37:49 +02001704 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001705 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001706
1707 /* Read features beyond page 1 if available */
1708 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1709 struct hci_cp_read_local_ext_features cp;
1710
1711 cp.page = p;
1712 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1713 sizeof(cp), &cp);
1714 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001715}
1716
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001717static void hci_init4_req(struct hci_request *req, unsigned long opt)
1718{
1719 struct hci_dev *hdev = req->hdev;
1720
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001721 /* Set event mask page 2 if the HCI command for it is supported */
1722 if (hdev->commands[22] & 0x04)
1723 hci_set_event_mask_page_2(req);
1724
Marcel Holtmann109e3192014-07-23 19:24:56 +02001725 /* Read local codec list if the HCI command is supported */
1726 if (hdev->commands[29] & 0x20)
1727 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1728
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001729 /* Get MWS transport configuration if the HCI command is supported */
1730 if (hdev->commands[30] & 0x08)
1731 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1732
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001733 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001734 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001735 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001736
1737 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001738 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001739 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001740 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1741 u8 support = 0x01;
1742 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1743 sizeof(support), &support);
1744 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001745}
1746
Johan Hedberg2177bab2013-03-05 20:37:43 +02001747static int __hci_init(struct hci_dev *hdev)
1748{
1749 int err;
1750
1751 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1752 if (err < 0)
1753 return err;
1754
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001755 /* The Device Under Test (DUT) mode is special and available for
1756 * all controller types. So just create it early on.
1757 */
1758 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1759 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1760 &dut_mode_fops);
1761 }
1762
Johan Hedberg2177bab2013-03-05 20:37:43 +02001763 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1764 * BR/EDR/LE type controllers. AMP controllers only need the
1765 * first stage init.
1766 */
1767 if (hdev->dev_type != HCI_BREDR)
1768 return 0;
1769
1770 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1771 if (err < 0)
1772 return err;
1773
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001774 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1775 if (err < 0)
1776 return err;
1777
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001778 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1779 if (err < 0)
1780 return err;
1781
1782 /* Only create debugfs entries during the initial setup
1783 * phase and not every time the controller gets powered on.
1784 */
1785 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1786 return 0;
1787
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001788 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1789 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001790 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1791 &hdev->manufacturer);
1792 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1793 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001794 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1795 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001796 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1797 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001798 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1799
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001800 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1801 &conn_info_min_age_fops);
1802 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1803 &conn_info_max_age_fops);
1804
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001805 if (lmp_bredr_capable(hdev)) {
1806 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1807 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001808 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1809 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001810 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1811 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001812 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1813 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001814 }
1815
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001816 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001817 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1818 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001819 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1820 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001821 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1822 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001823 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001824
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001825 if (lmp_sniff_capable(hdev)) {
1826 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1827 hdev, &idle_timeout_fops);
1828 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1829 hdev, &sniff_min_interval_fops);
1830 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1831 hdev, &sniff_max_interval_fops);
1832 }
1833
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001834 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001835 debugfs_create_file("identity", 0400, hdev->debugfs,
1836 hdev, &identity_fops);
1837 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1838 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001839 debugfs_create_file("random_address", 0444, hdev->debugfs,
1840 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001841 debugfs_create_file("static_address", 0444, hdev->debugfs,
1842 hdev, &static_address_fops);
1843
1844 /* For controllers with a public address, provide a debug
1845 * option to force the usage of the configured static
1846 * address. By default the public address is used.
1847 */
1848 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1849 debugfs_create_file("force_static_address", 0644,
1850 hdev->debugfs, hdev,
1851 &force_static_address_fops);
1852
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001853 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1854 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001855 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1856 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001857 debugfs_create_file("identity_resolving_keys", 0400,
1858 hdev->debugfs, hdev,
1859 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001860 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1861 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001862 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1863 hdev, &conn_min_interval_fops);
1864 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1865 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001866 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1867 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001868 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1869 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001870 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1871 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001872 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1873 hdev, &adv_min_interval_fops);
1874 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1875 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001876 debugfs_create_u16("discov_interleaved_timeout", 0644,
1877 hdev->debugfs,
1878 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001879
Johan Hedberg711eafe2014-08-08 09:32:52 +03001880 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001881 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001882
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001883 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001884}
1885
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001886static void hci_init0_req(struct hci_request *req, unsigned long opt)
1887{
1888 struct hci_dev *hdev = req->hdev;
1889
1890 BT_DBG("%s %ld", hdev->name, opt);
1891
1892 /* Reset */
1893 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1894 hci_reset_req(req, 0);
1895
1896 /* Read Local Version */
1897 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1898
1899 /* Read BD Address */
1900 if (hdev->set_bdaddr)
1901 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1902}
1903
1904static int __hci_unconf_init(struct hci_dev *hdev)
1905{
1906 int err;
1907
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001908 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1909 return 0;
1910
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001911 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1912 if (err < 0)
1913 return err;
1914
1915 return 0;
1916}
1917
Johan Hedberg42c6b122013-03-05 20:37:49 +02001918static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919{
1920 __u8 scan = opt;
1921
Johan Hedberg42c6b122013-03-05 20:37:49 +02001922 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
1924 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001925 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926}
1927
Johan Hedberg42c6b122013-03-05 20:37:49 +02001928static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929{
1930 __u8 auth = opt;
1931
Johan Hedberg42c6b122013-03-05 20:37:49 +02001932 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933
1934 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001935 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936}
1937
Johan Hedberg42c6b122013-03-05 20:37:49 +02001938static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939{
1940 __u8 encrypt = opt;
1941
Johan Hedberg42c6b122013-03-05 20:37:49 +02001942 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001944 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001945 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946}
1947
Johan Hedberg42c6b122013-03-05 20:37:49 +02001948static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001949{
1950 __le16 policy = cpu_to_le16(opt);
1951
Johan Hedberg42c6b122013-03-05 20:37:49 +02001952 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001953
1954 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001955 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001956}
1957
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001958/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 * Device is held on return. */
1960struct hci_dev *hci_dev_get(int index)
1961{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001962 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
1964 BT_DBG("%d", index);
1965
1966 if (index < 0)
1967 return NULL;
1968
1969 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001970 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 if (d->id == index) {
1972 hdev = hci_dev_hold(d);
1973 break;
1974 }
1975 }
1976 read_unlock(&hci_dev_list_lock);
1977 return hdev;
1978}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001981
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001982bool hci_discovery_active(struct hci_dev *hdev)
1983{
1984 struct discovery_state *discov = &hdev->discovery;
1985
Andre Guedes6fbe1952012-02-03 17:47:58 -03001986 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001987 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001988 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001989 return true;
1990
Andre Guedes6fbe1952012-02-03 17:47:58 -03001991 default:
1992 return false;
1993 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001994}
1995
Johan Hedbergff9ef572012-01-04 14:23:45 +02001996void hci_discovery_set_state(struct hci_dev *hdev, int state)
1997{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03001998 int old_state = hdev->discovery.state;
1999
Johan Hedbergff9ef572012-01-04 14:23:45 +02002000 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2001
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002002 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002003 return;
2004
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002005 hdev->discovery.state = state;
2006
Johan Hedbergff9ef572012-01-04 14:23:45 +02002007 switch (state) {
2008 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002009 hci_update_background_scan(hdev);
2010
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002011 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002012 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002013 break;
2014 case DISCOVERY_STARTING:
2015 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002016 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002017 mgmt_discovering(hdev, 1);
2018 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002019 case DISCOVERY_RESOLVING:
2020 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002021 case DISCOVERY_STOPPING:
2022 break;
2023 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002024}
2025
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002026void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027{
Johan Hedberg30883512012-01-04 14:16:21 +02002028 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002029 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
Johan Hedberg561aafb2012-01-04 13:31:59 +02002031 list_for_each_entry_safe(p, n, &cache->all, all) {
2032 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002033 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002035
2036 INIT_LIST_HEAD(&cache->unknown);
2037 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038}
2039
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002040struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2041 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042{
Johan Hedberg30883512012-01-04 14:16:21 +02002043 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 struct inquiry_entry *e;
2045
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002046 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047
Johan Hedberg561aafb2012-01-04 13:31:59 +02002048 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002050 return e;
2051 }
2052
2053 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054}
2055
Johan Hedberg561aafb2012-01-04 13:31:59 +02002056struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002057 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002058{
Johan Hedberg30883512012-01-04 14:16:21 +02002059 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060 struct inquiry_entry *e;
2061
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002062 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002063
2064 list_for_each_entry(e, &cache->unknown, list) {
2065 if (!bacmp(&e->data.bdaddr, bdaddr))
2066 return e;
2067 }
2068
2069 return NULL;
2070}
2071
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002072struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002073 bdaddr_t *bdaddr,
2074 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002075{
2076 struct discovery_state *cache = &hdev->discovery;
2077 struct inquiry_entry *e;
2078
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002079 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002080
2081 list_for_each_entry(e, &cache->resolve, list) {
2082 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2083 return e;
2084 if (!bacmp(&e->data.bdaddr, bdaddr))
2085 return e;
2086 }
2087
2088 return NULL;
2089}
2090
Johan Hedberga3d4e202012-01-09 00:53:02 +02002091void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002092 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002093{
2094 struct discovery_state *cache = &hdev->discovery;
2095 struct list_head *pos = &cache->resolve;
2096 struct inquiry_entry *p;
2097
2098 list_del(&ie->list);
2099
2100 list_for_each_entry(p, &cache->resolve, list) {
2101 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002102 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002103 break;
2104 pos = &p->list;
2105 }
2106
2107 list_add(&ie->list, pos);
2108}
2109
Marcel Holtmannaf589252014-07-01 14:11:20 +02002110u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2111 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112{
Johan Hedberg30883512012-01-04 14:16:21 +02002113 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002114 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002115 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002117 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Szymon Janc2b2fec42012-11-20 11:38:54 +01002119 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2120
Marcel Holtmannaf589252014-07-01 14:11:20 +02002121 if (!data->ssp_mode)
2122 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002123
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002124 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002125 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002126 if (!ie->data.ssp_mode)
2127 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002128
Johan Hedberga3d4e202012-01-09 00:53:02 +02002129 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002130 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002131 ie->data.rssi = data->rssi;
2132 hci_inquiry_cache_update_resolve(hdev, ie);
2133 }
2134
Johan Hedberg561aafb2012-01-04 13:31:59 +02002135 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002136 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002137
Johan Hedberg561aafb2012-01-04 13:31:59 +02002138 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002139 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002140 if (!ie) {
2141 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2142 goto done;
2143 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002144
2145 list_add(&ie->all, &cache->all);
2146
2147 if (name_known) {
2148 ie->name_state = NAME_KNOWN;
2149 } else {
2150 ie->name_state = NAME_NOT_KNOWN;
2151 list_add(&ie->list, &cache->unknown);
2152 }
2153
2154update:
2155 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002156 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002157 ie->name_state = NAME_KNOWN;
2158 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 }
2160
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002161 memcpy(&ie->data, data, sizeof(*data));
2162 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002164
2165 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002166 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002167
Marcel Holtmannaf589252014-07-01 14:11:20 +02002168done:
2169 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170}
2171
2172static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2173{
Johan Hedberg30883512012-01-04 14:16:21 +02002174 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 struct inquiry_info *info = (struct inquiry_info *) buf;
2176 struct inquiry_entry *e;
2177 int copied = 0;
2178
Johan Hedberg561aafb2012-01-04 13:31:59 +02002179 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002181
2182 if (copied >= num)
2183 break;
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 bacpy(&info->bdaddr, &data->bdaddr);
2186 info->pscan_rep_mode = data->pscan_rep_mode;
2187 info->pscan_period_mode = data->pscan_period_mode;
2188 info->pscan_mode = data->pscan_mode;
2189 memcpy(info->dev_class, data->dev_class, 3);
2190 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002193 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 }
2195
2196 BT_DBG("cache %p, copied %d", cache, copied);
2197 return copied;
2198}
2199
Johan Hedberg42c6b122013-03-05 20:37:49 +02002200static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
2202 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002203 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 struct hci_cp_inquiry cp;
2205
2206 BT_DBG("%s", hdev->name);
2207
2208 if (test_bit(HCI_INQUIRY, &hdev->flags))
2209 return;
2210
2211 /* Start Inquiry */
2212 memcpy(&cp.lap, &ir->lap, 3);
2213 cp.length = ir->length;
2214 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002215 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216}
2217
2218int hci_inquiry(void __user *arg)
2219{
2220 __u8 __user *ptr = arg;
2221 struct hci_inquiry_req ir;
2222 struct hci_dev *hdev;
2223 int err = 0, do_inquiry = 0, max_rsp;
2224 long timeo;
2225 __u8 *buf;
2226
2227 if (copy_from_user(&ir, ptr, sizeof(ir)))
2228 return -EFAULT;
2229
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002230 hdev = hci_dev_get(ir.dev_id);
2231 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 return -ENODEV;
2233
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002234 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2235 err = -EBUSY;
2236 goto done;
2237 }
2238
Marcel Holtmann4a964402014-07-02 19:10:33 +02002239 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002240 err = -EOPNOTSUPP;
2241 goto done;
2242 }
2243
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002244 if (hdev->dev_type != HCI_BREDR) {
2245 err = -EOPNOTSUPP;
2246 goto done;
2247 }
2248
Johan Hedberg56f87902013-10-02 13:43:13 +03002249 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2250 err = -EOPNOTSUPP;
2251 goto done;
2252 }
2253
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002254 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002255 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002256 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002257 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 do_inquiry = 1;
2259 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002260 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
Marcel Holtmann04837f62006-07-03 10:02:33 +02002262 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002263
2264 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002265 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2266 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002267 if (err < 0)
2268 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002269
2270 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2271 * cleared). If it is interrupted by a signal, return -EINTR.
2272 */
NeilBrown74316202014-07-07 15:16:04 +10002273 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002274 TASK_INTERRUPTIBLE))
2275 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002278 /* for unlimited number of responses we will use buffer with
2279 * 255 entries
2280 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2282
2283 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2284 * copy it to the user space.
2285 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002286 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002287 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 err = -ENOMEM;
2289 goto done;
2290 }
2291
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002292 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002294 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
2296 BT_DBG("num_rsp %d", ir.num_rsp);
2297
2298 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2299 ptr += sizeof(ir);
2300 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002301 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002303 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 err = -EFAULT;
2305
2306 kfree(buf);
2307
2308done:
2309 hci_dev_put(hdev);
2310 return err;
2311}
2312
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002313static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 int ret = 0;
2316
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 BT_DBG("%s %p", hdev->name, hdev);
2318
2319 hci_req_lock(hdev);
2320
Johan Hovold94324962012-03-15 14:48:41 +01002321 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2322 ret = -ENODEV;
2323 goto done;
2324 }
2325
Marcel Holtmannd603b762014-07-06 12:11:14 +02002326 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2327 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002328 /* Check for rfkill but allow the HCI setup stage to
2329 * proceed (which in itself doesn't cause any RF activity).
2330 */
2331 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2332 ret = -ERFKILL;
2333 goto done;
2334 }
2335
2336 /* Check for valid public address or a configured static
2337 * random adddress, but let the HCI setup proceed to
2338 * be able to determine if there is a public address
2339 * or not.
2340 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002341 * In case of user channel usage, it is not important
2342 * if a public address or static random address is
2343 * available.
2344 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002345 * This check is only valid for BR/EDR controllers
2346 * since AMP controllers do not have an address.
2347 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002348 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2349 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002350 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2351 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2352 ret = -EADDRNOTAVAIL;
2353 goto done;
2354 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002355 }
2356
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 if (test_bit(HCI_UP, &hdev->flags)) {
2358 ret = -EALREADY;
2359 goto done;
2360 }
2361
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 if (hdev->open(hdev)) {
2363 ret = -EIO;
2364 goto done;
2365 }
2366
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002367 atomic_set(&hdev->cmd_cnt, 1);
2368 set_bit(HCI_INIT, &hdev->flags);
2369
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002370 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2371 if (hdev->setup)
2372 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002373
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002374 /* The transport driver can set these quirks before
2375 * creating the HCI device or in its setup callback.
2376 *
2377 * In case any of them is set, the controller has to
2378 * start up as unconfigured.
2379 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002380 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2381 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002382 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002383
2384 /* For an unconfigured controller it is required to
2385 * read at least the version information provided by
2386 * the Read Local Version Information command.
2387 *
2388 * If the set_bdaddr driver callback is provided, then
2389 * also the original Bluetooth public device address
2390 * will be read using the Read BD Address command.
2391 */
2392 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2393 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002394 }
2395
Marcel Holtmann9713c172014-07-06 12:11:15 +02002396 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2397 /* If public address change is configured, ensure that
2398 * the address gets programmed. If the driver does not
2399 * support changing the public address, fail the power
2400 * on procedure.
2401 */
2402 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2403 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002404 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2405 else
2406 ret = -EADDRNOTAVAIL;
2407 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002408
2409 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002410 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002411 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002412 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 }
2414
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002415 clear_bit(HCI_INIT, &hdev->flags);
2416
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 if (!ret) {
2418 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002419 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 set_bit(HCI_UP, &hdev->flags);
2421 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002422 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002423 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002424 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002425 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002426 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002427 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002428 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002429 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002430 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002431 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002433 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002434 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002435 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
2437 skb_queue_purge(&hdev->cmd_q);
2438 skb_queue_purge(&hdev->rx_q);
2439
2440 if (hdev->flush)
2441 hdev->flush(hdev);
2442
2443 if (hdev->sent_cmd) {
2444 kfree_skb(hdev->sent_cmd);
2445 hdev->sent_cmd = NULL;
2446 }
2447
2448 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002449 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 }
2451
2452done:
2453 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 return ret;
2455}
2456
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002457/* ---- HCI ioctl helpers ---- */
2458
2459int hci_dev_open(__u16 dev)
2460{
2461 struct hci_dev *hdev;
2462 int err;
2463
2464 hdev = hci_dev_get(dev);
2465 if (!hdev)
2466 return -ENODEV;
2467
Marcel Holtmann4a964402014-07-02 19:10:33 +02002468 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002469 * up as user channel. Trying to bring them up as normal devices
2470 * will result into a failure. Only user channel operation is
2471 * possible.
2472 *
2473 * When this function is called for a user channel, the flag
2474 * HCI_USER_CHANNEL will be set first before attempting to
2475 * open the device.
2476 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002477 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002478 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 err = -EOPNOTSUPP;
2480 goto done;
2481 }
2482
Johan Hedberge1d08f42013-10-01 22:44:50 +03002483 /* We need to ensure that no other power on/off work is pending
2484 * before proceeding to call hci_dev_do_open. This is
2485 * particularly important if the setup procedure has not yet
2486 * completed.
2487 */
2488 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2489 cancel_delayed_work(&hdev->power_off);
2490
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002491 /* After this call it is guaranteed that the setup procedure
2492 * has finished. This means that error conditions like RFKILL
2493 * or no valid public or static random address apply.
2494 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002495 flush_workqueue(hdev->req_workqueue);
2496
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002497 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002498 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002499 * so that pairing works for them. Once the management interface
2500 * is in use this bit will be cleared again and userspace has
2501 * to explicitly enable it.
2502 */
2503 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2504 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002505 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002506
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002507 err = hci_dev_do_open(hdev);
2508
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002509done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002510 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002511 return err;
2512}
2513
Johan Hedbergd7347f32014-07-04 12:37:23 +03002514/* This function requires the caller holds hdev->lock */
2515static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2516{
2517 struct hci_conn_params *p;
2518
Johan Hedbergf161dd42014-08-15 21:06:54 +03002519 list_for_each_entry(p, &hdev->le_conn_params, list) {
2520 if (p->conn) {
2521 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002522 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002523 p->conn = NULL;
2524 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002525 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002526 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002527
2528 BT_DBG("All LE pending actions cleared");
2529}
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531static int hci_dev_do_close(struct hci_dev *hdev)
2532{
2533 BT_DBG("%s %p", hdev->name, hdev);
2534
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002535 cancel_delayed_work(&hdev->power_off);
2536
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 hci_req_cancel(hdev, ENODEV);
2538 hci_req_lock(hdev);
2539
2540 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002541 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 hci_req_unlock(hdev);
2543 return 0;
2544 }
2545
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002546 /* Flush RX and TX works */
2547 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002548 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002550 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002551 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002552 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002553 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002554 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002555 }
2556
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002557 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002558 cancel_delayed_work(&hdev->service_cache);
2559
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002560 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002561
2562 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2563 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002564
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002565 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002566 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002567 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002568 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002569 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
2571 hci_notify(hdev, HCI_DEV_DOWN);
2572
2573 if (hdev->flush)
2574 hdev->flush(hdev);
2575
2576 /* Reset device */
2577 skb_queue_purge(&hdev->cmd_q);
2578 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002579 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2580 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002581 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002583 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 clear_bit(HCI_INIT, &hdev->flags);
2585 }
2586
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002587 /* flush cmd work */
2588 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
2590 /* Drop queues */
2591 skb_queue_purge(&hdev->rx_q);
2592 skb_queue_purge(&hdev->cmd_q);
2593 skb_queue_purge(&hdev->raw_q);
2594
2595 /* Drop last sent command */
2596 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002597 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 kfree_skb(hdev->sent_cmd);
2599 hdev->sent_cmd = NULL;
2600 }
2601
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002602 kfree_skb(hdev->recv_evt);
2603 hdev->recv_evt = NULL;
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 /* After this point our queues are empty
2606 * and no tasks are scheduled. */
2607 hdev->close(hdev);
2608
Johan Hedberg35b973c2013-03-15 17:06:59 -05002609 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002610 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002611 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2612
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002613 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2614 if (hdev->dev_type == HCI_BREDR) {
2615 hci_dev_lock(hdev);
2616 mgmt_powered(hdev, 0);
2617 hci_dev_unlock(hdev);
2618 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002619 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002620
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002621 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002622 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002623
Johan Hedberge59fda82012-02-22 18:11:53 +02002624 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002625 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002626 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002627
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 hci_req_unlock(hdev);
2629
2630 hci_dev_put(hdev);
2631 return 0;
2632}
2633
2634int hci_dev_close(__u16 dev)
2635{
2636 struct hci_dev *hdev;
2637 int err;
2638
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002639 hdev = hci_dev_get(dev);
2640 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002642
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002643 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2644 err = -EBUSY;
2645 goto done;
2646 }
2647
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002648 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2649 cancel_delayed_work(&hdev->power_off);
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002652
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002653done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 hci_dev_put(hdev);
2655 return err;
2656}
2657
2658int hci_dev_reset(__u16 dev)
2659{
2660 struct hci_dev *hdev;
2661 int ret = 0;
2662
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002663 hdev = hci_dev_get(dev);
2664 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 return -ENODEV;
2666
2667 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668
Marcel Holtmann808a0492013-08-26 20:57:58 -07002669 if (!test_bit(HCI_UP, &hdev->flags)) {
2670 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002674 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2675 ret = -EBUSY;
2676 goto done;
2677 }
2678
Marcel Holtmann4a964402014-07-02 19:10:33 +02002679 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002680 ret = -EOPNOTSUPP;
2681 goto done;
2682 }
2683
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 /* Drop queues */
2685 skb_queue_purge(&hdev->rx_q);
2686 skb_queue_purge(&hdev->cmd_q);
2687
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002688 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002689 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002691 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692
2693 if (hdev->flush)
2694 hdev->flush(hdev);
2695
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002696 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002697 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002699 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 hci_req_unlock(hdev);
2703 hci_dev_put(hdev);
2704 return ret;
2705}
2706
2707int hci_dev_reset_stat(__u16 dev)
2708{
2709 struct hci_dev *hdev;
2710 int ret = 0;
2711
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002712 hdev = hci_dev_get(dev);
2713 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 return -ENODEV;
2715
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002716 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2717 ret = -EBUSY;
2718 goto done;
2719 }
2720
Marcel Holtmann4a964402014-07-02 19:10:33 +02002721 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002722 ret = -EOPNOTSUPP;
2723 goto done;
2724 }
2725
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2727
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002728done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 return ret;
2731}
2732
Johan Hedberg123abc02014-07-10 12:09:07 +03002733static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2734{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002735 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002736
2737 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2738
2739 if ((scan & SCAN_PAGE))
2740 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2741 &hdev->dev_flags);
2742 else
2743 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2744 &hdev->dev_flags);
2745
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002746 if ((scan & SCAN_INQUIRY)) {
2747 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2748 &hdev->dev_flags);
2749 } else {
2750 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2751 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2752 &hdev->dev_flags);
2753 }
2754
Johan Hedberg123abc02014-07-10 12:09:07 +03002755 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2756 return;
2757
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002758 if (conn_changed || discov_changed) {
2759 /* In case this was disabled through mgmt */
2760 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2761
2762 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2763 mgmt_update_adv_data(hdev);
2764
Johan Hedberg123abc02014-07-10 12:09:07 +03002765 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002766 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002767}
2768
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769int hci_dev_cmd(unsigned int cmd, void __user *arg)
2770{
2771 struct hci_dev *hdev;
2772 struct hci_dev_req dr;
2773 int err = 0;
2774
2775 if (copy_from_user(&dr, arg, sizeof(dr)))
2776 return -EFAULT;
2777
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002778 hdev = hci_dev_get(dr.dev_id);
2779 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 return -ENODEV;
2781
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2783 err = -EBUSY;
2784 goto done;
2785 }
2786
Marcel Holtmann4a964402014-07-02 19:10:33 +02002787 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002788 err = -EOPNOTSUPP;
2789 goto done;
2790 }
2791
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002792 if (hdev->dev_type != HCI_BREDR) {
2793 err = -EOPNOTSUPP;
2794 goto done;
2795 }
2796
Johan Hedberg56f87902013-10-02 13:43:13 +03002797 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2798 err = -EOPNOTSUPP;
2799 goto done;
2800 }
2801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 switch (cmd) {
2803 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002804 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2805 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 break;
2807
2808 case HCISETENCRYPT:
2809 if (!lmp_encrypt_capable(hdev)) {
2810 err = -EOPNOTSUPP;
2811 break;
2812 }
2813
2814 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2815 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002816 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2817 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 if (err)
2819 break;
2820 }
2821
Johan Hedberg01178cd2013-03-05 20:37:41 +02002822 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2823 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 break;
2825
2826 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002827 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2828 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002829
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002830 /* Ensure that the connectable and discoverable states
2831 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002832 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002833 if (!err)
2834 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 break;
2836
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002837 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002838 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2839 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002840 break;
2841
2842 case HCISETLINKMODE:
2843 hdev->link_mode = ((__u16) dr.dev_opt) &
2844 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2845 break;
2846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 case HCISETPTYPE:
2848 hdev->pkt_type = (__u16) dr.dev_opt;
2849 break;
2850
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002852 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2853 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 break;
2855
2856 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002857 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2858 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 break;
2860
2861 default:
2862 err = -EINVAL;
2863 break;
2864 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002865
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002866done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 hci_dev_put(hdev);
2868 return err;
2869}
2870
2871int hci_get_dev_list(void __user *arg)
2872{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002873 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 struct hci_dev_list_req *dl;
2875 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 int n = 0, size, err;
2877 __u16 dev_num;
2878
2879 if (get_user(dev_num, (__u16 __user *) arg))
2880 return -EFAULT;
2881
2882 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2883 return -EINVAL;
2884
2885 size = sizeof(*dl) + dev_num * sizeof(*dr);
2886
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002887 dl = kzalloc(size, GFP_KERNEL);
2888 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 return -ENOMEM;
2890
2891 dr = dl->dev_req;
2892
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002893 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002894 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002895 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002896
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002897 /* When the auto-off is configured it means the transport
2898 * is running, but in that case still indicate that the
2899 * device is actually down.
2900 */
2901 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2902 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002903
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002905 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002906
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 if (++n >= dev_num)
2908 break;
2909 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002910 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
2912 dl->dev_num = n;
2913 size = sizeof(*dl) + n * sizeof(*dr);
2914
2915 err = copy_to_user(arg, dl, size);
2916 kfree(dl);
2917
2918 return err ? -EFAULT : 0;
2919}
2920
2921int hci_get_dev_info(void __user *arg)
2922{
2923 struct hci_dev *hdev;
2924 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002925 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 int err = 0;
2927
2928 if (copy_from_user(&di, arg, sizeof(di)))
2929 return -EFAULT;
2930
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002931 hdev = hci_dev_get(di.dev_id);
2932 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933 return -ENODEV;
2934
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002935 /* When the auto-off is configured it means the transport
2936 * is running, but in that case still indicate that the
2937 * device is actually down.
2938 */
2939 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2940 flags = hdev->flags & ~BIT(HCI_UP);
2941 else
2942 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002943
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 strcpy(di.name, hdev->name);
2945 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002946 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002947 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002949 if (lmp_bredr_capable(hdev)) {
2950 di.acl_mtu = hdev->acl_mtu;
2951 di.acl_pkts = hdev->acl_pkts;
2952 di.sco_mtu = hdev->sco_mtu;
2953 di.sco_pkts = hdev->sco_pkts;
2954 } else {
2955 di.acl_mtu = hdev->le_mtu;
2956 di.acl_pkts = hdev->le_pkts;
2957 di.sco_mtu = 0;
2958 di.sco_pkts = 0;
2959 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 di.link_policy = hdev->link_policy;
2961 di.link_mode = hdev->link_mode;
2962
2963 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2964 memcpy(&di.features, &hdev->features, sizeof(di.features));
2965
2966 if (copy_to_user(arg, &di, sizeof(di)))
2967 err = -EFAULT;
2968
2969 hci_dev_put(hdev);
2970
2971 return err;
2972}
2973
2974/* ---- Interface to HCI drivers ---- */
2975
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002976static int hci_rfkill_set_block(void *data, bool blocked)
2977{
2978 struct hci_dev *hdev = data;
2979
2980 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2981
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002982 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2983 return -EBUSY;
2984
Johan Hedberg5e130362013-09-13 08:58:17 +03002985 if (blocked) {
2986 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002987 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2988 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002989 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002990 } else {
2991 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002992 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002993
2994 return 0;
2995}
2996
2997static const struct rfkill_ops hci_rfkill_ops = {
2998 .set_block = hci_rfkill_set_block,
2999};
3000
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003001static void hci_power_on(struct work_struct *work)
3002{
3003 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003004 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003005
3006 BT_DBG("%s", hdev->name);
3007
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003008 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003009 if (err < 0) {
3010 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003011 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003012 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003013
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003014 /* During the HCI setup phase, a few error conditions are
3015 * ignored and they need to be checked now. If they are still
3016 * valid, it is important to turn the device back off.
3017 */
3018 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003019 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003020 (hdev->dev_type == HCI_BREDR &&
3021 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3022 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003023 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3024 hci_dev_do_close(hdev);
3025 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003026 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3027 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003028 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003029
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003030 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003031 /* For unconfigured devices, set the HCI_RAW flag
3032 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003033 */
3034 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3035 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003036
3037 /* For fully configured devices, this will send
3038 * the Index Added event. For unconfigured devices,
3039 * it will send Unconfigued Index Added event.
3040 *
3041 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3042 * and no event will be send.
3043 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003044 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003045 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003046 /* When the controller is now configured, then it
3047 * is important to clear the HCI_RAW flag.
3048 */
3049 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3050 clear_bit(HCI_RAW, &hdev->flags);
3051
Marcel Holtmannd603b762014-07-06 12:11:14 +02003052 /* Powering on the controller with HCI_CONFIG set only
3053 * happens with the transition from unconfigured to
3054 * configured. This will send the Index Added event.
3055 */
3056 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003057 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003058}
3059
3060static void hci_power_off(struct work_struct *work)
3061{
Johan Hedberg32435532011-11-07 22:16:04 +02003062 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003063 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003064
3065 BT_DBG("%s", hdev->name);
3066
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003067 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003068}
3069
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003070static void hci_discov_off(struct work_struct *work)
3071{
3072 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003073
3074 hdev = container_of(work, struct hci_dev, discov_off.work);
3075
3076 BT_DBG("%s", hdev->name);
3077
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003078 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003079}
3080
Johan Hedberg35f74982014-02-18 17:14:32 +02003081void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003082{
Johan Hedberg48210022013-01-27 00:31:28 +02003083 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003084
Johan Hedberg48210022013-01-27 00:31:28 +02003085 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3086 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003087 kfree(uuid);
3088 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003089}
3090
Johan Hedberg35f74982014-02-18 17:14:32 +02003091void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003092{
3093 struct list_head *p, *n;
3094
3095 list_for_each_safe(p, n, &hdev->link_keys) {
3096 struct link_key *key;
3097
3098 key = list_entry(p, struct link_key, list);
3099
3100 list_del(p);
3101 kfree(key);
3102 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003103}
3104
Johan Hedberg35f74982014-02-18 17:14:32 +02003105void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003106{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003107 struct smp_ltk *k;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003108
Johan Hedberg970d0f12014-11-13 14:37:47 +02003109 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3110 list_del_rcu(&k->list);
3111 kfree_rcu(k, rcu);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003112 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003113}
3114
Johan Hedberg970c4e42014-02-18 10:19:33 +02003115void hci_smp_irks_clear(struct hci_dev *hdev)
3116{
3117 struct smp_irk *k, *tmp;
3118
3119 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3120 list_del(&k->list);
3121 kfree(k);
3122 }
3123}
3124
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003125struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3126{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003127 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003128
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003129 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003130 if (bacmp(bdaddr, &k->bdaddr) == 0)
3131 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003132
3133 return NULL;
3134}
3135
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303136static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003137 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003138{
3139 /* Legacy key */
3140 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303141 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003142
3143 /* Debug keys are insecure so don't store them persistently */
3144 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303145 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003146
3147 /* Changed combination key and there's no previous one */
3148 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303149 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003150
3151 /* Security mode 3 case */
3152 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303153 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003154
3155 /* Neither local nor remote side had no-bonding as requirement */
3156 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303157 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003158
3159 /* Local side had dedicated bonding as requirement */
3160 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303161 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003162
3163 /* Remote side had dedicated bonding as requirement */
3164 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303165 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003166
3167 /* If none of the above criteria match, then don't store the key
3168 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303169 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003170}
3171
Johan Hedberge804d252014-07-16 11:42:28 +03003172static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003173{
Johan Hedberge804d252014-07-16 11:42:28 +03003174 if (type == SMP_LTK)
3175 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003176
Johan Hedberge804d252014-07-16 11:42:28 +03003177 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003178}
3179
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003180struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003181 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003182{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003183 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003184
Johan Hedberg970d0f12014-11-13 14:37:47 +02003185 rcu_read_lock();
3186 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003187 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003188 continue;
3189
Johan Hedberge804d252014-07-16 11:42:28 +03003190 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003191 continue;
3192
Johan Hedberg970d0f12014-11-13 14:37:47 +02003193 rcu_read_unlock();
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003194 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003195 }
Johan Hedberg970d0f12014-11-13 14:37:47 +02003196 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003197
3198 return NULL;
3199}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003200
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003201struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003202 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003203{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003204 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003205
Johan Hedberg970d0f12014-11-13 14:37:47 +02003206 rcu_read_lock();
3207 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003208 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003209 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberg970d0f12014-11-13 14:37:47 +02003210 ltk_role(k->type) == role) {
3211 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003212 return k;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003213 }
3214 }
3215 rcu_read_unlock();
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003216
3217 return NULL;
3218}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003219
Johan Hedberg970c4e42014-02-18 10:19:33 +02003220struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3221{
3222 struct smp_irk *irk;
3223
3224 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3225 if (!bacmp(&irk->rpa, rpa))
3226 return irk;
3227 }
3228
3229 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003230 if (smp_irk_matches(hdev, irk->val, rpa)) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003231 bacpy(&irk->rpa, rpa);
3232 return irk;
3233 }
3234 }
3235
3236 return NULL;
3237}
3238
3239struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3240 u8 addr_type)
3241{
3242 struct smp_irk *irk;
3243
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003244 /* Identity Address must be public or static random */
3245 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3246 return NULL;
3247
Johan Hedberg970c4e42014-02-18 10:19:33 +02003248 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3249 if (addr_type == irk->addr_type &&
3250 bacmp(bdaddr, &irk->bdaddr) == 0)
3251 return irk;
3252 }
3253
3254 return NULL;
3255}
3256
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003257struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003258 bdaddr_t *bdaddr, u8 *val, u8 type,
3259 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003260{
3261 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303262 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003263
3264 old_key = hci_find_link_key(hdev, bdaddr);
3265 if (old_key) {
3266 old_key_type = old_key->type;
3267 key = old_key;
3268 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003269 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003270 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003271 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003272 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003273 list_add(&key->list, &hdev->link_keys);
3274 }
3275
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003276 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003277
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003278 /* Some buggy controller combinations generate a changed
3279 * combination key for legacy pairing even when there's no
3280 * previous key */
3281 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003282 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003283 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003284 if (conn)
3285 conn->key_type = type;
3286 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003287
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003288 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003289 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003290 key->pin_len = pin_len;
3291
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003292 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003293 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003294 else
3295 key->type = type;
3296
Johan Hedberg7652ff62014-06-24 13:15:49 +03003297 if (persistent)
3298 *persistent = hci_persistent_key(hdev, conn, type,
3299 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003300
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003301 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003302}
3303
Johan Hedbergca9142b2014-02-19 14:57:44 +02003304struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003305 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003306 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003307{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003308 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003309 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003310
Johan Hedberge804d252014-07-16 11:42:28 +03003311 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003312 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003313 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003314 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003315 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003316 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003317 return NULL;
Johan Hedberg970d0f12014-11-13 14:37:47 +02003318 list_add_rcu(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003319 }
3320
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003321 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003322 key->bdaddr_type = addr_type;
3323 memcpy(key->val, tk, sizeof(key->val));
3324 key->authenticated = authenticated;
3325 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003326 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003327 key->enc_size = enc_size;
3328 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003329
Johan Hedbergca9142b2014-02-19 14:57:44 +02003330 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003331}
3332
Johan Hedbergca9142b2014-02-19 14:57:44 +02003333struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3334 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003335{
3336 struct smp_irk *irk;
3337
3338 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3339 if (!irk) {
3340 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3341 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003342 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003343
3344 bacpy(&irk->bdaddr, bdaddr);
3345 irk->addr_type = addr_type;
3346
3347 list_add(&irk->list, &hdev->identity_resolving_keys);
3348 }
3349
3350 memcpy(irk->val, val, 16);
3351 bacpy(&irk->rpa, rpa);
3352
Johan Hedbergca9142b2014-02-19 14:57:44 +02003353 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003354}
3355
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003356int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3357{
3358 struct link_key *key;
3359
3360 key = hci_find_link_key(hdev, bdaddr);
3361 if (!key)
3362 return -ENOENT;
3363
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003365
3366 list_del(&key->list);
3367 kfree(key);
3368
3369 return 0;
3370}
3371
Johan Hedberge0b2b272014-02-18 17:14:31 +02003372int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003373{
Johan Hedberg970d0f12014-11-13 14:37:47 +02003374 struct smp_ltk *k;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003375 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003376
Johan Hedberg970d0f12014-11-13 14:37:47 +02003377 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003378 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003379 continue;
3380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003381 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003382
Johan Hedberg970d0f12014-11-13 14:37:47 +02003383 list_del_rcu(&k->list);
3384 kfree_rcu(k, rcu);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003385 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003386 }
3387
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003388 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003389}
3390
Johan Hedberga7ec7332014-02-18 17:14:35 +02003391void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3392{
3393 struct smp_irk *k, *tmp;
3394
Johan Hedberg668b7b12014-02-21 16:03:31 +02003395 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003396 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3397 continue;
3398
3399 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3400
3401 list_del(&k->list);
3402 kfree(k);
3403 }
3404}
3405
Ville Tervo6bd32322011-02-16 16:32:41 +02003406/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003407static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003408{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003409 struct hci_dev *hdev = container_of(work, struct hci_dev,
3410 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003411
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003412 if (hdev->sent_cmd) {
3413 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3414 u16 opcode = __le16_to_cpu(sent->opcode);
3415
3416 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3417 } else {
3418 BT_ERR("%s command tx timeout", hdev->name);
3419 }
3420
Ville Tervo6bd32322011-02-16 16:32:41 +02003421 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003422 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003423}
3424
Szymon Janc2763eda2011-03-22 13:12:22 +01003425struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003426 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003427{
3428 struct oob_data *data;
3429
3430 list_for_each_entry(data, &hdev->remote_oob_data, list)
3431 if (bacmp(bdaddr, &data->bdaddr) == 0)
3432 return data;
3433
3434 return NULL;
3435}
3436
3437int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3438{
3439 struct oob_data *data;
3440
3441 data = hci_find_remote_oob_data(hdev, bdaddr);
3442 if (!data)
3443 return -ENOENT;
3444
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003445 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003446
3447 list_del(&data->list);
3448 kfree(data);
3449
3450 return 0;
3451}
3452
Johan Hedberg35f74982014-02-18 17:14:32 +02003453void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003454{
3455 struct oob_data *data, *n;
3456
3457 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3458 list_del(&data->list);
3459 kfree(data);
3460 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003461}
3462
Marcel Holtmann07988722014-01-10 02:07:29 -08003463int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3464 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003465{
3466 struct oob_data *data;
3467
3468 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003469 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003470 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003471 if (!data)
3472 return -ENOMEM;
3473
3474 bacpy(&data->bdaddr, bdaddr);
3475 list_add(&data->list, &hdev->remote_oob_data);
3476 }
3477
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003478 memcpy(data->hash192, hash, sizeof(data->hash192));
3479 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003480
Marcel Holtmann07988722014-01-10 02:07:29 -08003481 memset(data->hash256, 0, sizeof(data->hash256));
3482 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3483
3484 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3485
3486 return 0;
3487}
3488
3489int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3490 u8 *hash192, u8 *randomizer192,
3491 u8 *hash256, u8 *randomizer256)
3492{
3493 struct oob_data *data;
3494
3495 data = hci_find_remote_oob_data(hdev, bdaddr);
3496 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003497 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003498 if (!data)
3499 return -ENOMEM;
3500
3501 bacpy(&data->bdaddr, bdaddr);
3502 list_add(&data->list, &hdev->remote_oob_data);
3503 }
3504
3505 memcpy(data->hash192, hash192, sizeof(data->hash192));
3506 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3507
3508 memcpy(data->hash256, hash256, sizeof(data->hash256));
3509 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3510
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003511 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003512
3513 return 0;
3514}
3515
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003516struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003517 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003518{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003519 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003520
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003521 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003522 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003523 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003524 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003525
3526 return NULL;
3527}
3528
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003529void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003530{
3531 struct list_head *p, *n;
3532
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003533 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003534 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003535
3536 list_del(p);
3537 kfree(b);
3538 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003539}
3540
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003541int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003542{
3543 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003545 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003546 return -EBADF;
3547
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003548 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003549 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003550
Johan Hedberg27f70f32014-07-21 10:50:06 +03003551 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003552 if (!entry)
3553 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003554
3555 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003556 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003557
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003558 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003559
3560 return 0;
3561}
3562
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003563int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003564{
3565 struct bdaddr_list *entry;
3566
Johan Hedberg35f74982014-02-18 17:14:32 +02003567 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003568 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003569 return 0;
3570 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003571
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003572 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003573 if (!entry)
3574 return -ENOENT;
3575
3576 list_del(&entry->list);
3577 kfree(entry);
3578
3579 return 0;
3580}
3581
Andre Guedes15819a72014-02-03 13:56:18 -03003582/* This function requires the caller holds hdev->lock */
3583struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3584 bdaddr_t *addr, u8 addr_type)
3585{
3586 struct hci_conn_params *params;
3587
Johan Hedberg738f6182014-07-03 19:33:51 +03003588 /* The conn params list only contains identity addresses */
3589 if (!hci_is_identity_address(addr, addr_type))
3590 return NULL;
3591
Andre Guedes15819a72014-02-03 13:56:18 -03003592 list_for_each_entry(params, &hdev->le_conn_params, list) {
3593 if (bacmp(&params->addr, addr) == 0 &&
3594 params->addr_type == addr_type) {
3595 return params;
3596 }
3597 }
3598
3599 return NULL;
3600}
3601
Andre Guedescef952c2014-02-26 20:21:49 -03003602static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3603{
3604 struct hci_conn *conn;
3605
3606 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3607 if (!conn)
3608 return false;
3609
3610 if (conn->dst_type != type)
3611 return false;
3612
3613 if (conn->state != BT_CONNECTED)
3614 return false;
3615
3616 return true;
3617}
3618
Andre Guedes15819a72014-02-03 13:56:18 -03003619/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003620struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3621 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003622{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003623 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003624
Johan Hedberg738f6182014-07-03 19:33:51 +03003625 /* The list only contains identity addresses */
3626 if (!hci_is_identity_address(addr, addr_type))
3627 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003628
Johan Hedberg501f8822014-07-04 12:37:26 +03003629 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003630 if (bacmp(&param->addr, addr) == 0 &&
3631 param->addr_type == addr_type)
3632 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003633 }
3634
3635 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003636}
3637
3638/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003639struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3640 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003641{
3642 struct hci_conn_params *params;
3643
Johan Hedbergc46245b2014-07-02 17:37:33 +03003644 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003645 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003646
Andre Guedes15819a72014-02-03 13:56:18 -03003647 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003648 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003649 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003650
3651 params = kzalloc(sizeof(*params), GFP_KERNEL);
3652 if (!params) {
3653 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003654 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003655 }
3656
3657 bacpy(&params->addr, addr);
3658 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003659
3660 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003661 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003662
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003663 params->conn_min_interval = hdev->le_conn_min_interval;
3664 params->conn_max_interval = hdev->le_conn_max_interval;
3665 params->conn_latency = hdev->le_conn_latency;
3666 params->supervision_timeout = hdev->le_supv_timeout;
3667 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3668
3669 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3670
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003671 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003672}
3673
3674/* This function requires the caller holds hdev->lock */
3675int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003676 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003677{
3678 struct hci_conn_params *params;
3679
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003680 params = hci_conn_params_add(hdev, addr, addr_type);
3681 if (!params)
3682 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003683
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003684 if (params->auto_connect == auto_connect)
3685 return 0;
3686
Johan Hedberg95305ba2014-07-04 12:37:21 +03003687 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003688
Andre Guedescef952c2014-02-26 20:21:49 -03003689 switch (auto_connect) {
3690 case HCI_AUTO_CONN_DISABLED:
3691 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003692 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003693 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003694 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003695 list_add(&params->action, &hdev->pend_le_reports);
3696 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003697 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003698 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003699 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003700 if (!is_connected(hdev, addr, addr_type)) {
3701 list_add(&params->action, &hdev->pend_le_conns);
3702 hci_update_background_scan(hdev);
3703 }
Andre Guedescef952c2014-02-26 20:21:49 -03003704 break;
3705 }
Andre Guedes15819a72014-02-03 13:56:18 -03003706
Johan Hedberg851efca2014-07-02 22:42:00 +03003707 params->auto_connect = auto_connect;
3708
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003709 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3710 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003711
3712 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003713}
3714
Johan Hedbergf6c63242014-08-15 21:06:59 +03003715static void hci_conn_params_free(struct hci_conn_params *params)
3716{
3717 if (params->conn) {
3718 hci_conn_drop(params->conn);
3719 hci_conn_put(params->conn);
3720 }
3721
3722 list_del(&params->action);
3723 list_del(&params->list);
3724 kfree(params);
3725}
3726
Andre Guedes15819a72014-02-03 13:56:18 -03003727/* This function requires the caller holds hdev->lock */
3728void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3729{
3730 struct hci_conn_params *params;
3731
3732 params = hci_conn_params_lookup(hdev, addr, addr_type);
3733 if (!params)
3734 return;
3735
Johan Hedbergf6c63242014-08-15 21:06:59 +03003736 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003737
Johan Hedberg95305ba2014-07-04 12:37:21 +03003738 hci_update_background_scan(hdev);
3739
Andre Guedes15819a72014-02-03 13:56:18 -03003740 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3741}
3742
3743/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003744void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003745{
3746 struct hci_conn_params *params, *tmp;
3747
3748 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003749 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3750 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003751 list_del(&params->list);
3752 kfree(params);
3753 }
3754
Johan Hedberg55af49a2014-07-02 17:37:26 +03003755 BT_DBG("All LE disabled connection parameters were removed");
3756}
3757
3758/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003759void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003760{
3761 struct hci_conn_params *params, *tmp;
3762
Johan Hedbergf6c63242014-08-15 21:06:59 +03003763 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3764 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003765
Johan Hedberga2f41a82014-07-04 12:37:19 +03003766 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003767
Andre Guedes15819a72014-02-03 13:56:18 -03003768 BT_DBG("All LE connection parameters were removed");
3769}
3770
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003771static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003772{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003773 if (status) {
3774 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003775
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003776 hci_dev_lock(hdev);
3777 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3778 hci_dev_unlock(hdev);
3779 return;
3780 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003781}
3782
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003783static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003784{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003785 /* General inquiry access code (GIAC) */
3786 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3787 struct hci_request req;
3788 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003789 int err;
3790
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003791 if (status) {
3792 BT_ERR("Failed to disable LE scanning: status %d", status);
3793 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003794 }
3795
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003796 switch (hdev->discovery.type) {
3797 case DISCOV_TYPE_LE:
3798 hci_dev_lock(hdev);
3799 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3800 hci_dev_unlock(hdev);
3801 break;
3802
3803 case DISCOV_TYPE_INTERLEAVED:
3804 hci_req_init(&req, hdev);
3805
3806 memset(&cp, 0, sizeof(cp));
3807 memcpy(&cp.lap, lap, sizeof(cp.lap));
3808 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3809 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3810
3811 hci_dev_lock(hdev);
3812
3813 hci_inquiry_cache_flush(hdev);
3814
3815 err = hci_req_run(&req, inquiry_complete);
3816 if (err) {
3817 BT_ERR("Inquiry request failed: err %d", err);
3818 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3819 }
3820
3821 hci_dev_unlock(hdev);
3822 break;
3823 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003824}
3825
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003826static void le_scan_disable_work(struct work_struct *work)
3827{
3828 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003829 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003830 struct hci_request req;
3831 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003832
3833 BT_DBG("%s", hdev->name);
3834
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003835 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003836
Andre Guedesb1efcc22014-02-26 20:21:40 -03003837 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003838
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003839 err = hci_req_run(&req, le_scan_disable_work_complete);
3840 if (err)
3841 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003842}
3843
Johan Hedberg8d972502014-02-28 12:54:14 +02003844static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3845{
3846 struct hci_dev *hdev = req->hdev;
3847
3848 /* If we're advertising or initiating an LE connection we can't
3849 * go ahead and change the random address at this time. This is
3850 * because the eventual initiator address used for the
3851 * subsequently created connection will be undefined (some
3852 * controllers use the new address and others the one we had
3853 * when the operation started).
3854 *
3855 * In this kind of scenario skip the update and let the random
3856 * address be updated at the next cycle.
3857 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003858 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003859 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3860 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003861 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003862 return;
3863 }
3864
3865 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3866}
3867
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003868int hci_update_random_address(struct hci_request *req, bool require_privacy,
3869 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003870{
3871 struct hci_dev *hdev = req->hdev;
3872 int err;
3873
3874 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003875 * current RPA has expired or there is something else than
3876 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003877 */
3878 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003879 int to;
3880
3881 *own_addr_type = ADDR_LE_DEV_RANDOM;
3882
3883 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003884 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003885 return 0;
3886
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003887 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003888 if (err < 0) {
3889 BT_ERR("%s failed to generate new RPA", hdev->name);
3890 return err;
3891 }
3892
Johan Hedberg8d972502014-02-28 12:54:14 +02003893 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003894
3895 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3896 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3897
3898 return 0;
3899 }
3900
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003901 /* In case of required privacy without resolvable private address,
3902 * use an unresolvable private address. This is useful for active
3903 * scanning and non-connectable advertising.
3904 */
3905 if (require_privacy) {
3906 bdaddr_t urpa;
3907
3908 get_random_bytes(&urpa, 6);
3909 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3910
3911 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003912 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003913 return 0;
3914 }
3915
Johan Hedbergebd3a742014-02-23 19:42:21 +02003916 /* If forcing static address is in use or there is no public
3917 * address use the static address as random address (but skip
3918 * the HCI command if the current random address is already the
3919 * static one.
3920 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003921 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003922 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3923 *own_addr_type = ADDR_LE_DEV_RANDOM;
3924 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3925 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3926 &hdev->static_addr);
3927 return 0;
3928 }
3929
3930 /* Neither privacy nor static address is being used so use a
3931 * public address.
3932 */
3933 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3934
3935 return 0;
3936}
3937
Johan Hedberga1f4c312014-02-27 14:05:41 +02003938/* Copy the Identity Address of the controller.
3939 *
3940 * If the controller has a public BD_ADDR, then by default use that one.
3941 * If this is a LE only controller without a public address, default to
3942 * the static random address.
3943 *
3944 * For debugging purposes it is possible to force controllers with a
3945 * public address to use the static random address instead.
3946 */
3947void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3948 u8 *bdaddr_type)
3949{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003950 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003951 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3952 bacpy(bdaddr, &hdev->static_addr);
3953 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3954 } else {
3955 bacpy(bdaddr, &hdev->bdaddr);
3956 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3957 }
3958}
3959
David Herrmann9be0dab2012-04-22 14:39:57 +02003960/* Alloc HCI device */
3961struct hci_dev *hci_alloc_dev(void)
3962{
3963 struct hci_dev *hdev;
3964
Johan Hedberg27f70f32014-07-21 10:50:06 +03003965 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003966 if (!hdev)
3967 return NULL;
3968
David Herrmannb1b813d2012-04-22 14:39:58 +02003969 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3970 hdev->esco_type = (ESCO_HV1);
3971 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003972 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3973 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003974 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003975 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3976 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003977
David Herrmannb1b813d2012-04-22 14:39:58 +02003978 hdev->sniff_max_interval = 800;
3979 hdev->sniff_min_interval = 80;
3980
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003981 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003982 hdev->le_adv_min_interval = 0x0800;
3983 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003984 hdev->le_scan_interval = 0x0060;
3985 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003986 hdev->le_conn_min_interval = 0x0028;
3987 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003988 hdev->le_conn_latency = 0x0000;
3989 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003990
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003991 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003992 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003993 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3994 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003995
David Herrmannb1b813d2012-04-22 14:39:58 +02003996 mutex_init(&hdev->lock);
3997 mutex_init(&hdev->req_lock);
3998
3999 INIT_LIST_HEAD(&hdev->mgmt_pending);
4000 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004001 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02004002 INIT_LIST_HEAD(&hdev->uuids);
4003 INIT_LIST_HEAD(&hdev->link_keys);
4004 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004005 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004006 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004007 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004008 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004009 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004010 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004011 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004012
4013 INIT_WORK(&hdev->rx_work, hci_rx_work);
4014 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4015 INIT_WORK(&hdev->tx_work, hci_tx_work);
4016 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004017
David Herrmannb1b813d2012-04-22 14:39:58 +02004018 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4019 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4020 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4021
David Herrmannb1b813d2012-04-22 14:39:58 +02004022 skb_queue_head_init(&hdev->rx_q);
4023 skb_queue_head_init(&hdev->cmd_q);
4024 skb_queue_head_init(&hdev->raw_q);
4025
4026 init_waitqueue_head(&hdev->req_wait_q);
4027
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004028 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004029
David Herrmannb1b813d2012-04-22 14:39:58 +02004030 hci_init_sysfs(hdev);
4031 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004032
4033 return hdev;
4034}
4035EXPORT_SYMBOL(hci_alloc_dev);
4036
4037/* Free HCI device */
4038void hci_free_dev(struct hci_dev *hdev)
4039{
David Herrmann9be0dab2012-04-22 14:39:57 +02004040 /* will free via device release */
4041 put_device(&hdev->dev);
4042}
4043EXPORT_SYMBOL(hci_free_dev);
4044
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045/* Register HCI device */
4046int hci_register_dev(struct hci_dev *hdev)
4047{
David Herrmannb1b813d2012-04-22 14:39:58 +02004048 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049
Marcel Holtmann74292d52014-07-06 15:50:27 +02004050 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 return -EINVAL;
4052
Mat Martineau08add512011-11-02 16:18:36 -07004053 /* Do not allow HCI_AMP devices to register at index 0,
4054 * so the index can be used as the AMP controller ID.
4055 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004056 switch (hdev->dev_type) {
4057 case HCI_BREDR:
4058 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4059 break;
4060 case HCI_AMP:
4061 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4062 break;
4063 default:
4064 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004066
Sasha Levin3df92b32012-05-27 22:36:56 +02004067 if (id < 0)
4068 return id;
4069
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 sprintf(hdev->name, "hci%d", id);
4071 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004072
4073 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4074
Kees Cookd8537542013-07-03 15:04:57 -07004075 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4076 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004077 if (!hdev->workqueue) {
4078 error = -ENOMEM;
4079 goto err;
4080 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004081
Kees Cookd8537542013-07-03 15:04:57 -07004082 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4083 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004084 if (!hdev->req_workqueue) {
4085 destroy_workqueue(hdev->workqueue);
4086 error = -ENOMEM;
4087 goto err;
4088 }
4089
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004090 if (!IS_ERR_OR_NULL(bt_debugfs))
4091 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4092
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004093 dev_set_name(&hdev->dev, "%s", hdev->name);
4094
4095 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004096 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004097 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004099 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004100 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4101 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004102 if (hdev->rfkill) {
4103 if (rfkill_register(hdev->rfkill) < 0) {
4104 rfkill_destroy(hdev->rfkill);
4105 hdev->rfkill = NULL;
4106 }
4107 }
4108
Johan Hedberg5e130362013-09-13 08:58:17 +03004109 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4110 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4111
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004112 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004113 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004114
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004115 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004116 /* Assume BR/EDR support until proven otherwise (such as
4117 * through reading supported features during init.
4118 */
4119 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4120 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004121
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004122 write_lock(&hci_dev_list_lock);
4123 list_add(&hdev->list, &hci_dev_list);
4124 write_unlock(&hci_dev_list_lock);
4125
Marcel Holtmann4a964402014-07-02 19:10:33 +02004126 /* Devices that are marked for raw-only usage are unconfigured
4127 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004128 */
4129 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004130 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004133 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134
Johan Hedberg19202572013-01-14 22:33:51 +02004135 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004136
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004138
David Herrmann33ca9542011-10-08 14:58:49 +02004139err_wqueue:
4140 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004141 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004142err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004143 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004144
David Herrmann33ca9542011-10-08 14:58:49 +02004145 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004146}
4147EXPORT_SYMBOL(hci_register_dev);
4148
4149/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004150void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151{
Sasha Levin3df92b32012-05-27 22:36:56 +02004152 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004153
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004154 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155
Johan Hovold94324962012-03-15 14:48:41 +01004156 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4157
Sasha Levin3df92b32012-05-27 22:36:56 +02004158 id = hdev->id;
4159
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004160 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004162 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163
4164 hci_dev_do_close(hdev);
4165
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304166 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004167 kfree_skb(hdev->reassembly[i]);
4168
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004169 cancel_work_sync(&hdev->power_on);
4170
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004171 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004172 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4173 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004174 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004175 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004176 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004177 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004178
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004179 /* mgmt_index_removed should take care of emptying the
4180 * pending list */
4181 BUG_ON(!list_empty(&hdev->mgmt_pending));
4182
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 hci_notify(hdev, HCI_DEV_UNREG);
4184
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004185 if (hdev->rfkill) {
4186 rfkill_unregister(hdev->rfkill);
4187 rfkill_destroy(hdev->rfkill);
4188 }
4189
Johan Hedberg711eafe2014-08-08 09:32:52 +03004190 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004191
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004192 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004193
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004194 debugfs_remove_recursive(hdev->debugfs);
4195
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004196 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004197 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004198
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004199 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004200 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004201 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004202 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004203 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004204 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004205 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004206 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004207 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004208 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004209 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004210
David Herrmanndc946bd2012-01-07 15:47:24 +01004211 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004212
4213 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214}
4215EXPORT_SYMBOL(hci_unregister_dev);
4216
4217/* Suspend HCI device */
4218int hci_suspend_dev(struct hci_dev *hdev)
4219{
4220 hci_notify(hdev, HCI_DEV_SUSPEND);
4221 return 0;
4222}
4223EXPORT_SYMBOL(hci_suspend_dev);
4224
4225/* Resume HCI device */
4226int hci_resume_dev(struct hci_dev *hdev)
4227{
4228 hci_notify(hdev, HCI_DEV_RESUME);
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_resume_dev);
4232
Marcel Holtmann75e05692014-11-02 08:15:38 +01004233/* Reset HCI device */
4234int hci_reset_dev(struct hci_dev *hdev)
4235{
4236 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4237 struct sk_buff *skb;
4238
4239 skb = bt_skb_alloc(3, GFP_ATOMIC);
4240 if (!skb)
4241 return -ENOMEM;
4242
4243 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4244 memcpy(skb_put(skb, 3), hw_err, 3);
4245
4246 /* Send Hardware Error to upper stack */
4247 return hci_recv_frame(hdev, skb);
4248}
4249EXPORT_SYMBOL(hci_reset_dev);
4250
Marcel Holtmann76bca882009-11-18 00:40:39 +01004251/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004252int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004253{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004254 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004255 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004256 kfree_skb(skb);
4257 return -ENXIO;
4258 }
4259
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004260 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004261 bt_cb(skb)->incoming = 1;
4262
4263 /* Time stamp */
4264 __net_timestamp(skb);
4265
Marcel Holtmann76bca882009-11-18 00:40:39 +01004266 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004267 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004268
Marcel Holtmann76bca882009-11-18 00:40:39 +01004269 return 0;
4270}
4271EXPORT_SYMBOL(hci_recv_frame);
4272
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304273static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004274 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304275{
4276 int len = 0;
4277 int hlen = 0;
4278 int remain = count;
4279 struct sk_buff *skb;
4280 struct bt_skb_cb *scb;
4281
4282 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004283 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304284 return -EILSEQ;
4285
4286 skb = hdev->reassembly[index];
4287
4288 if (!skb) {
4289 switch (type) {
4290 case HCI_ACLDATA_PKT:
4291 len = HCI_MAX_FRAME_SIZE;
4292 hlen = HCI_ACL_HDR_SIZE;
4293 break;
4294 case HCI_EVENT_PKT:
4295 len = HCI_MAX_EVENT_SIZE;
4296 hlen = HCI_EVENT_HDR_SIZE;
4297 break;
4298 case HCI_SCODATA_PKT:
4299 len = HCI_MAX_SCO_SIZE;
4300 hlen = HCI_SCO_HDR_SIZE;
4301 break;
4302 }
4303
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004304 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304305 if (!skb)
4306 return -ENOMEM;
4307
4308 scb = (void *) skb->cb;
4309 scb->expect = hlen;
4310 scb->pkt_type = type;
4311
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304312 hdev->reassembly[index] = skb;
4313 }
4314
4315 while (count) {
4316 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004317 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304318
4319 memcpy(skb_put(skb, len), data, len);
4320
4321 count -= len;
4322 data += len;
4323 scb->expect -= len;
4324 remain = count;
4325
4326 switch (type) {
4327 case HCI_EVENT_PKT:
4328 if (skb->len == HCI_EVENT_HDR_SIZE) {
4329 struct hci_event_hdr *h = hci_event_hdr(skb);
4330 scb->expect = h->plen;
4331
4332 if (skb_tailroom(skb) < scb->expect) {
4333 kfree_skb(skb);
4334 hdev->reassembly[index] = NULL;
4335 return -ENOMEM;
4336 }
4337 }
4338 break;
4339
4340 case HCI_ACLDATA_PKT:
4341 if (skb->len == HCI_ACL_HDR_SIZE) {
4342 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4343 scb->expect = __le16_to_cpu(h->dlen);
4344
4345 if (skb_tailroom(skb) < scb->expect) {
4346 kfree_skb(skb);
4347 hdev->reassembly[index] = NULL;
4348 return -ENOMEM;
4349 }
4350 }
4351 break;
4352
4353 case HCI_SCODATA_PKT:
4354 if (skb->len == HCI_SCO_HDR_SIZE) {
4355 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4356 scb->expect = h->dlen;
4357
4358 if (skb_tailroom(skb) < scb->expect) {
4359 kfree_skb(skb);
4360 hdev->reassembly[index] = NULL;
4361 return -ENOMEM;
4362 }
4363 }
4364 break;
4365 }
4366
4367 if (scb->expect == 0) {
4368 /* Complete frame */
4369
4370 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004371 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304372
4373 hdev->reassembly[index] = NULL;
4374 return remain;
4375 }
4376 }
4377
4378 return remain;
4379}
4380
Suraj Sumangala99811512010-07-14 13:02:19 +05304381#define STREAM_REASSEMBLY 0
4382
4383int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4384{
4385 int type;
4386 int rem = 0;
4387
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004388 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304389 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4390
4391 if (!skb) {
4392 struct { char type; } *pkt;
4393
4394 /* Start of the frame */
4395 pkt = data;
4396 type = pkt->type;
4397
4398 data++;
4399 count--;
4400 } else
4401 type = bt_cb(skb)->pkt_type;
4402
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004403 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004404 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304405 if (rem < 0)
4406 return rem;
4407
4408 data += (count - rem);
4409 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004410 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304411
4412 return rem;
4413}
4414EXPORT_SYMBOL(hci_recv_stream_fragment);
4415
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416/* ---- Interface to upper protocols ---- */
4417
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418int hci_register_cb(struct hci_cb *cb)
4419{
4420 BT_DBG("%p name %s", cb, cb->name);
4421
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004422 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004423 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004424 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004425
4426 return 0;
4427}
4428EXPORT_SYMBOL(hci_register_cb);
4429
4430int hci_unregister_cb(struct hci_cb *cb)
4431{
4432 BT_DBG("%p name %s", cb, cb->name);
4433
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004434 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004436 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437
4438 return 0;
4439}
4440EXPORT_SYMBOL(hci_unregister_cb);
4441
Marcel Holtmann51086992013-10-10 14:54:19 -07004442static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004444 int err;
4445
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004446 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004448 /* Time stamp */
4449 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004451 /* Send copy to monitor */
4452 hci_send_to_monitor(hdev, skb);
4453
4454 if (atomic_read(&hdev->promisc)) {
4455 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004456 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 }
4458
4459 /* Get rid of skb owner, prior to sending to the driver. */
4460 skb_orphan(skb);
4461
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004462 err = hdev->send(hdev, skb);
4463 if (err < 0) {
4464 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4465 kfree_skb(skb);
4466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467}
4468
Johan Hedberg3119ae92013-03-05 20:37:44 +02004469void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4470{
4471 skb_queue_head_init(&req->cmd_q);
4472 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004473 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004474}
4475
4476int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4477{
4478 struct hci_dev *hdev = req->hdev;
4479 struct sk_buff *skb;
4480 unsigned long flags;
4481
4482 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4483
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004484 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004485 * commands queued on the HCI request queue.
4486 */
4487 if (req->err) {
4488 skb_queue_purge(&req->cmd_q);
4489 return req->err;
4490 }
4491
Johan Hedberg3119ae92013-03-05 20:37:44 +02004492 /* Do not allow empty requests */
4493 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004494 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004495
4496 skb = skb_peek_tail(&req->cmd_q);
4497 bt_cb(skb)->req.complete = complete;
4498
4499 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4500 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4501 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4502
4503 queue_work(hdev->workqueue, &hdev->cmd_work);
4504
4505 return 0;
4506}
4507
Marcel Holtmann899de762014-07-11 05:51:58 +02004508bool hci_req_pending(struct hci_dev *hdev)
4509{
4510 return (hdev->req_status == HCI_REQ_PEND);
4511}
4512
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004513static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004514 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515{
4516 int len = HCI_COMMAND_HDR_SIZE + plen;
4517 struct hci_command_hdr *hdr;
4518 struct sk_buff *skb;
4519
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004521 if (!skb)
4522 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
4524 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004525 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526 hdr->plen = plen;
4527
4528 if (plen)
4529 memcpy(skb_put(skb, plen), param, plen);
4530
4531 BT_DBG("skb len %d", skb->len);
4532
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004533 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004534 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004535
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004536 return skb;
4537}
4538
4539/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004540int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4541 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004542{
4543 struct sk_buff *skb;
4544
4545 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4546
4547 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4548 if (!skb) {
4549 BT_ERR("%s no memory for command", hdev->name);
4550 return -ENOMEM;
4551 }
4552
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004553 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004554 * single-command requests.
4555 */
4556 bt_cb(skb)->req.start = true;
4557
Linus Torvalds1da177e2005-04-16 15:20:36 -07004558 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004559 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004560
4561 return 0;
4562}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004563
Johan Hedberg71c76a12013-03-05 20:37:46 +02004564/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004565void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4566 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004567{
4568 struct hci_dev *hdev = req->hdev;
4569 struct sk_buff *skb;
4570
4571 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4572
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004573 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004574 * queueing the HCI command. We can simply return.
4575 */
4576 if (req->err)
4577 return;
4578
Johan Hedberg71c76a12013-03-05 20:37:46 +02004579 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4580 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004581 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4582 hdev->name, opcode);
4583 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004584 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004585 }
4586
4587 if (skb_queue_empty(&req->cmd_q))
4588 bt_cb(skb)->req.start = true;
4589
Johan Hedberg02350a72013-04-03 21:50:29 +03004590 bt_cb(skb)->req.event = event;
4591
Johan Hedberg71c76a12013-03-05 20:37:46 +02004592 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004593}
4594
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004595void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4596 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004597{
4598 hci_req_add_ev(req, opcode, plen, param, 0);
4599}
4600
Linus Torvalds1da177e2005-04-16 15:20:36 -07004601/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004602void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603{
4604 struct hci_command_hdr *hdr;
4605
4606 if (!hdev->sent_cmd)
4607 return NULL;
4608
4609 hdr = (void *) hdev->sent_cmd->data;
4610
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004611 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004612 return NULL;
4613
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004614 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615
4616 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4617}
4618
4619/* Send ACL data */
4620static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4621{
4622 struct hci_acl_hdr *hdr;
4623 int len = skb->len;
4624
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004625 skb_push(skb, HCI_ACL_HDR_SIZE);
4626 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004627 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004628 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4629 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630}
4631
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004632static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004633 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004634{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004635 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636 struct hci_dev *hdev = conn->hdev;
4637 struct sk_buff *list;
4638
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004639 skb->len = skb_headlen(skb);
4640 skb->data_len = 0;
4641
4642 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004643
4644 switch (hdev->dev_type) {
4645 case HCI_BREDR:
4646 hci_add_acl_hdr(skb, conn->handle, flags);
4647 break;
4648 case HCI_AMP:
4649 hci_add_acl_hdr(skb, chan->handle, flags);
4650 break;
4651 default:
4652 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4653 return;
4654 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004655
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004656 list = skb_shinfo(skb)->frag_list;
4657 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658 /* Non fragmented */
4659 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4660
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004661 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004662 } else {
4663 /* Fragmented */
4664 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4665
4666 skb_shinfo(skb)->frag_list = NULL;
4667
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004668 /* Queue all fragments atomically. We need to use spin_lock_bh
4669 * here because of 6LoWPAN links, as there this function is
4670 * called from softirq and using normal spin lock could cause
4671 * deadlocks.
4672 */
4673 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004674
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004675 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004676
4677 flags &= ~ACL_START;
4678 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004679 do {
4680 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004681
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004682 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004683 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004684
4685 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4686
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004687 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004688 } while (list);
4689
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004690 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004692}
4693
4694void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4695{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004696 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004697
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004698 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004699
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004700 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004702 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004703}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704
4705/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004706void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004707{
4708 struct hci_dev *hdev = conn->hdev;
4709 struct hci_sco_hdr hdr;
4710
4711 BT_DBG("%s len %d", hdev->name, skb->len);
4712
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004713 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004714 hdr.dlen = skb->len;
4715
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004716 skb_push(skb, HCI_SCO_HDR_SIZE);
4717 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004718 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004720 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004721
Linus Torvalds1da177e2005-04-16 15:20:36 -07004722 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004723 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725
4726/* ---- HCI TX task (outgoing data) ---- */
4727
4728/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004729static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4730 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731{
4732 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004733 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004734 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004736 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004738
4739 rcu_read_lock();
4740
4741 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004742 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004744
4745 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4746 continue;
4747
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748 num++;
4749
4750 if (c->sent < min) {
4751 min = c->sent;
4752 conn = c;
4753 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004754
4755 if (hci_conn_num(hdev, type) == num)
4756 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004757 }
4758
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004759 rcu_read_unlock();
4760
Linus Torvalds1da177e2005-04-16 15:20:36 -07004761 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004762 int cnt, q;
4763
4764 switch (conn->type) {
4765 case ACL_LINK:
4766 cnt = hdev->acl_cnt;
4767 break;
4768 case SCO_LINK:
4769 case ESCO_LINK:
4770 cnt = hdev->sco_cnt;
4771 break;
4772 case LE_LINK:
4773 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4774 break;
4775 default:
4776 cnt = 0;
4777 BT_ERR("Unknown link type");
4778 }
4779
4780 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004781 *quote = q ? q : 1;
4782 } else
4783 *quote = 0;
4784
4785 BT_DBG("conn %p quote %d", conn, *quote);
4786 return conn;
4787}
4788
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004789static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004790{
4791 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004792 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004793
Ville Tervobae1f5d92011-02-10 22:38:53 -03004794 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004796 rcu_read_lock();
4797
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004799 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004800 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004801 BT_ERR("%s killing stalled connection %pMR",
4802 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004803 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004804 }
4805 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004806
4807 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004808}
4809
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004810static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4811 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004812{
4813 struct hci_conn_hash *h = &hdev->conn_hash;
4814 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004815 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004816 struct hci_conn *conn;
4817 int cnt, q, conn_num = 0;
4818
4819 BT_DBG("%s", hdev->name);
4820
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004821 rcu_read_lock();
4822
4823 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004824 struct hci_chan *tmp;
4825
4826 if (conn->type != type)
4827 continue;
4828
4829 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4830 continue;
4831
4832 conn_num++;
4833
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004834 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004835 struct sk_buff *skb;
4836
4837 if (skb_queue_empty(&tmp->data_q))
4838 continue;
4839
4840 skb = skb_peek(&tmp->data_q);
4841 if (skb->priority < cur_prio)
4842 continue;
4843
4844 if (skb->priority > cur_prio) {
4845 num = 0;
4846 min = ~0;
4847 cur_prio = skb->priority;
4848 }
4849
4850 num++;
4851
4852 if (conn->sent < min) {
4853 min = conn->sent;
4854 chan = tmp;
4855 }
4856 }
4857
4858 if (hci_conn_num(hdev, type) == conn_num)
4859 break;
4860 }
4861
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004862 rcu_read_unlock();
4863
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004864 if (!chan)
4865 return NULL;
4866
4867 switch (chan->conn->type) {
4868 case ACL_LINK:
4869 cnt = hdev->acl_cnt;
4870 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004871 case AMP_LINK:
4872 cnt = hdev->block_cnt;
4873 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004874 case SCO_LINK:
4875 case ESCO_LINK:
4876 cnt = hdev->sco_cnt;
4877 break;
4878 case LE_LINK:
4879 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4880 break;
4881 default:
4882 cnt = 0;
4883 BT_ERR("Unknown link type");
4884 }
4885
4886 q = cnt / num;
4887 *quote = q ? q : 1;
4888 BT_DBG("chan %p quote %d", chan, *quote);
4889 return chan;
4890}
4891
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004892static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4893{
4894 struct hci_conn_hash *h = &hdev->conn_hash;
4895 struct hci_conn *conn;
4896 int num = 0;
4897
4898 BT_DBG("%s", hdev->name);
4899
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004900 rcu_read_lock();
4901
4902 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004903 struct hci_chan *chan;
4904
4905 if (conn->type != type)
4906 continue;
4907
4908 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4909 continue;
4910
4911 num++;
4912
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004913 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004914 struct sk_buff *skb;
4915
4916 if (chan->sent) {
4917 chan->sent = 0;
4918 continue;
4919 }
4920
4921 if (skb_queue_empty(&chan->data_q))
4922 continue;
4923
4924 skb = skb_peek(&chan->data_q);
4925 if (skb->priority >= HCI_PRIO_MAX - 1)
4926 continue;
4927
4928 skb->priority = HCI_PRIO_MAX - 1;
4929
4930 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004931 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004932 }
4933
4934 if (hci_conn_num(hdev, type) == num)
4935 break;
4936 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004937
4938 rcu_read_unlock();
4939
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004940}
4941
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004942static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4943{
4944 /* Calculate count of blocks used by this packet */
4945 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4946}
4947
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004948static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004949{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004950 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004951 /* ACL tx timeout must be longer than maximum
4952 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004953 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004954 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004955 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004956 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004957}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004958
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004959static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004960{
4961 unsigned int cnt = hdev->acl_cnt;
4962 struct hci_chan *chan;
4963 struct sk_buff *skb;
4964 int quote;
4965
4966 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004967
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004968 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004969 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004970 u32 priority = (skb_peek(&chan->data_q))->priority;
4971 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004972 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004973 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004974
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004975 /* Stop if priority has changed */
4976 if (skb->priority < priority)
4977 break;
4978
4979 skb = skb_dequeue(&chan->data_q);
4980
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004981 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004982 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004983
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004984 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004985 hdev->acl_last_tx = jiffies;
4986
4987 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004988 chan->sent++;
4989 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004990 }
4991 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004992
4993 if (cnt != hdev->acl_cnt)
4994 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004995}
4996
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004997static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004998{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004999 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005000 struct hci_chan *chan;
5001 struct sk_buff *skb;
5002 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005003 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005004
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02005005 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005006
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005007 BT_DBG("%s", hdev->name);
5008
5009 if (hdev->dev_type == HCI_AMP)
5010 type = AMP_LINK;
5011 else
5012 type = ACL_LINK;
5013
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005014 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005015 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005016 u32 priority = (skb_peek(&chan->data_q))->priority;
5017 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5018 int blocks;
5019
5020 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005021 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005022
5023 /* Stop if priority has changed */
5024 if (skb->priority < priority)
5025 break;
5026
5027 skb = skb_dequeue(&chan->data_q);
5028
5029 blocks = __get_blocks(hdev, skb);
5030 if (blocks > hdev->block_cnt)
5031 return;
5032
5033 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005034 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005035
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005036 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005037 hdev->acl_last_tx = jiffies;
5038
5039 hdev->block_cnt -= blocks;
5040 quote -= blocks;
5041
5042 chan->sent += blocks;
5043 chan->conn->sent += blocks;
5044 }
5045 }
5046
5047 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005048 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005049}
5050
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005051static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005052{
5053 BT_DBG("%s", hdev->name);
5054
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005055 /* No ACL link over BR/EDR controller */
5056 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5057 return;
5058
5059 /* No AMP link over AMP controller */
5060 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005061 return;
5062
5063 switch (hdev->flow_ctl_mode) {
5064 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5065 hci_sched_acl_pkt(hdev);
5066 break;
5067
5068 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5069 hci_sched_acl_blk(hdev);
5070 break;
5071 }
5072}
5073
Linus Torvalds1da177e2005-04-16 15:20:36 -07005074/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005075static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005076{
5077 struct hci_conn *conn;
5078 struct sk_buff *skb;
5079 int quote;
5080
5081 BT_DBG("%s", hdev->name);
5082
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005083 if (!hci_conn_num(hdev, SCO_LINK))
5084 return;
5085
Linus Torvalds1da177e2005-04-16 15:20:36 -07005086 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5087 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5088 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005089 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090
5091 conn->sent++;
5092 if (conn->sent == ~0)
5093 conn->sent = 0;
5094 }
5095 }
5096}
5097
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005098static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005099{
5100 struct hci_conn *conn;
5101 struct sk_buff *skb;
5102 int quote;
5103
5104 BT_DBG("%s", hdev->name);
5105
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005106 if (!hci_conn_num(hdev, ESCO_LINK))
5107 return;
5108
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005109 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5110 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005111 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5112 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005113 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005114
5115 conn->sent++;
5116 if (conn->sent == ~0)
5117 conn->sent = 0;
5118 }
5119 }
5120}
5121
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005122static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005123{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005124 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005125 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005126 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005127
5128 BT_DBG("%s", hdev->name);
5129
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005130 if (!hci_conn_num(hdev, LE_LINK))
5131 return;
5132
Marcel Holtmann4a964402014-07-02 19:10:33 +02005133 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005134 /* LE tx timeout must be longer than maximum
5135 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005136 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005137 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005138 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005139 }
5140
5141 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005142 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005143 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005144 u32 priority = (skb_peek(&chan->data_q))->priority;
5145 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005146 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005147 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005148
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005149 /* Stop if priority has changed */
5150 if (skb->priority < priority)
5151 break;
5152
5153 skb = skb_dequeue(&chan->data_q);
5154
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005155 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005156 hdev->le_last_tx = jiffies;
5157
5158 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005159 chan->sent++;
5160 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005161 }
5162 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005163
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005164 if (hdev->le_pkts)
5165 hdev->le_cnt = cnt;
5166 else
5167 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005168
5169 if (cnt != tmp)
5170 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005171}
5172
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005173static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005175 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176 struct sk_buff *skb;
5177
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005178 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005179 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
Marcel Holtmann52de5992013-09-03 18:08:38 -07005181 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5182 /* Schedule queues and send stuff to HCI driver */
5183 hci_sched_acl(hdev);
5184 hci_sched_sco(hdev);
5185 hci_sched_esco(hdev);
5186 hci_sched_le(hdev);
5187 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005188
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189 /* Send next queued raw (unknown type) packet */
5190 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005191 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192}
5193
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005194/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
5196/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005197static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005198{
5199 struct hci_acl_hdr *hdr = (void *) skb->data;
5200 struct hci_conn *conn;
5201 __u16 handle, flags;
5202
5203 skb_pull(skb, HCI_ACL_HDR_SIZE);
5204
5205 handle = __le16_to_cpu(hdr->handle);
5206 flags = hci_flags(handle);
5207 handle = hci_handle(handle);
5208
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005209 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005210 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005211
5212 hdev->stat.acl_rx++;
5213
5214 hci_dev_lock(hdev);
5215 conn = hci_conn_hash_lookup_handle(hdev, handle);
5216 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005217
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005219 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005220
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005222 l2cap_recv_acldata(conn, skb, flags);
5223 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005225 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005226 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005227 }
5228
5229 kfree_skb(skb);
5230}
5231
5232/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005233static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005234{
5235 struct hci_sco_hdr *hdr = (void *) skb->data;
5236 struct hci_conn *conn;
5237 __u16 handle;
5238
5239 skb_pull(skb, HCI_SCO_HDR_SIZE);
5240
5241 handle = __le16_to_cpu(hdr->handle);
5242
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005243 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005244
5245 hdev->stat.sco_rx++;
5246
5247 hci_dev_lock(hdev);
5248 conn = hci_conn_hash_lookup_handle(hdev, handle);
5249 hci_dev_unlock(hdev);
5250
5251 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005253 sco_recv_scodata(conn, skb);
5254 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005255 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005256 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005257 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005258 }
5259
5260 kfree_skb(skb);
5261}
5262
Johan Hedberg9238f362013-03-05 20:37:48 +02005263static bool hci_req_is_complete(struct hci_dev *hdev)
5264{
5265 struct sk_buff *skb;
5266
5267 skb = skb_peek(&hdev->cmd_q);
5268 if (!skb)
5269 return true;
5270
5271 return bt_cb(skb)->req.start;
5272}
5273
Johan Hedberg42c6b122013-03-05 20:37:49 +02005274static void hci_resend_last(struct hci_dev *hdev)
5275{
5276 struct hci_command_hdr *sent;
5277 struct sk_buff *skb;
5278 u16 opcode;
5279
5280 if (!hdev->sent_cmd)
5281 return;
5282
5283 sent = (void *) hdev->sent_cmd->data;
5284 opcode = __le16_to_cpu(sent->opcode);
5285 if (opcode == HCI_OP_RESET)
5286 return;
5287
5288 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5289 if (!skb)
5290 return;
5291
5292 skb_queue_head(&hdev->cmd_q, skb);
5293 queue_work(hdev->workqueue, &hdev->cmd_work);
5294}
5295
Johan Hedberg9238f362013-03-05 20:37:48 +02005296void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5297{
5298 hci_req_complete_t req_complete = NULL;
5299 struct sk_buff *skb;
5300 unsigned long flags;
5301
5302 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5303
Johan Hedberg42c6b122013-03-05 20:37:49 +02005304 /* If the completed command doesn't match the last one that was
5305 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005306 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005307 if (!hci_sent_cmd_data(hdev, opcode)) {
5308 /* Some CSR based controllers generate a spontaneous
5309 * reset complete event during init and any pending
5310 * command will never be completed. In such a case we
5311 * need to resend whatever was the last sent
5312 * command.
5313 */
5314 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5315 hci_resend_last(hdev);
5316
Johan Hedberg9238f362013-03-05 20:37:48 +02005317 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005318 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005319
5320 /* If the command succeeded and there's still more commands in
5321 * this request the request is not yet complete.
5322 */
5323 if (!status && !hci_req_is_complete(hdev))
5324 return;
5325
5326 /* If this was the last command in a request the complete
5327 * callback would be found in hdev->sent_cmd instead of the
5328 * command queue (hdev->cmd_q).
5329 */
5330 if (hdev->sent_cmd) {
5331 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005332
5333 if (req_complete) {
5334 /* We must set the complete callback to NULL to
5335 * avoid calling the callback more than once if
5336 * this function gets called again.
5337 */
5338 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5339
Johan Hedberg9238f362013-03-05 20:37:48 +02005340 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005341 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005342 }
5343
5344 /* Remove all pending commands belonging to this request */
5345 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5346 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5347 if (bt_cb(skb)->req.start) {
5348 __skb_queue_head(&hdev->cmd_q, skb);
5349 break;
5350 }
5351
5352 req_complete = bt_cb(skb)->req.complete;
5353 kfree_skb(skb);
5354 }
5355 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5356
5357call_complete:
5358 if (req_complete)
5359 req_complete(hdev, status);
5360}
5361
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005362static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005364 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005365 struct sk_buff *skb;
5366
5367 BT_DBG("%s", hdev->name);
5368
Linus Torvalds1da177e2005-04-16 15:20:36 -07005369 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005370 /* Send copy to monitor */
5371 hci_send_to_monitor(hdev, skb);
5372
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 if (atomic_read(&hdev->promisc)) {
5374 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005375 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005376 }
5377
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005378 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005379 kfree_skb(skb);
5380 continue;
5381 }
5382
5383 if (test_bit(HCI_INIT, &hdev->flags)) {
5384 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005385 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386 case HCI_ACLDATA_PKT:
5387 case HCI_SCODATA_PKT:
5388 kfree_skb(skb);
5389 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 }
5392
5393 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005394 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005396 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397 hci_event_packet(hdev, skb);
5398 break;
5399
5400 case HCI_ACLDATA_PKT:
5401 BT_DBG("%s ACL data packet", hdev->name);
5402 hci_acldata_packet(hdev, skb);
5403 break;
5404
5405 case HCI_SCODATA_PKT:
5406 BT_DBG("%s SCO data packet", hdev->name);
5407 hci_scodata_packet(hdev, skb);
5408 break;
5409
5410 default:
5411 kfree_skb(skb);
5412 break;
5413 }
5414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005415}
5416
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005417static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005419 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005420 struct sk_buff *skb;
5421
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005422 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5423 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005424
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005426 if (atomic_read(&hdev->cmd_cnt)) {
5427 skb = skb_dequeue(&hdev->cmd_q);
5428 if (!skb)
5429 return;
5430
Wei Yongjun7585b972009-02-25 18:29:52 +08005431 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005432
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005433 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005434 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005435 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005436 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005437 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005438 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005439 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005440 schedule_delayed_work(&hdev->cmd_timer,
5441 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005442 } else {
5443 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005444 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005445 }
5446 }
5447}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005448
5449void hci_req_add_le_scan_disable(struct hci_request *req)
5450{
5451 struct hci_cp_le_set_scan_enable cp;
5452
5453 memset(&cp, 0, sizeof(cp));
5454 cp.enable = LE_SCAN_DISABLE;
5455 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5456}
Andre Guedesa4790db2014-02-26 20:21:47 -03005457
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005458static void add_to_white_list(struct hci_request *req,
5459 struct hci_conn_params *params)
5460{
5461 struct hci_cp_le_add_to_white_list cp;
5462
5463 cp.bdaddr_type = params->addr_type;
5464 bacpy(&cp.bdaddr, &params->addr);
5465
5466 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5467}
5468
5469static u8 update_white_list(struct hci_request *req)
5470{
5471 struct hci_dev *hdev = req->hdev;
5472 struct hci_conn_params *params;
5473 struct bdaddr_list *b;
5474 uint8_t white_list_entries = 0;
5475
5476 /* Go through the current white list programmed into the
5477 * controller one by one and check if that address is still
5478 * in the list of pending connections or list of devices to
5479 * report. If not present in either list, then queue the
5480 * command to remove it from the controller.
5481 */
5482 list_for_each_entry(b, &hdev->le_white_list, list) {
5483 struct hci_cp_le_del_from_white_list cp;
5484
5485 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5486 &b->bdaddr, b->bdaddr_type) ||
5487 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5488 &b->bdaddr, b->bdaddr_type)) {
5489 white_list_entries++;
5490 continue;
5491 }
5492
5493 cp.bdaddr_type = b->bdaddr_type;
5494 bacpy(&cp.bdaddr, &b->bdaddr);
5495
5496 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5497 sizeof(cp), &cp);
5498 }
5499
5500 /* Since all no longer valid white list entries have been
5501 * removed, walk through the list of pending connections
5502 * and ensure that any new device gets programmed into
5503 * the controller.
5504 *
5505 * If the list of the devices is larger than the list of
5506 * available white list entries in the controller, then
5507 * just abort and return filer policy value to not use the
5508 * white list.
5509 */
5510 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5511 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5512 &params->addr, params->addr_type))
5513 continue;
5514
5515 if (white_list_entries >= hdev->le_white_list_size) {
5516 /* Select filter policy to accept all advertising */
5517 return 0x00;
5518 }
5519
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005520 if (hci_find_irk_by_addr(hdev, &params->addr,
5521 params->addr_type)) {
5522 /* White list can not be used with RPAs */
5523 return 0x00;
5524 }
5525
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005526 white_list_entries++;
5527 add_to_white_list(req, params);
5528 }
5529
5530 /* After adding all new pending connections, walk through
5531 * the list of pending reports and also add these to the
5532 * white list if there is still space.
5533 */
5534 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5535 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5536 &params->addr, params->addr_type))
5537 continue;
5538
5539 if (white_list_entries >= hdev->le_white_list_size) {
5540 /* Select filter policy to accept all advertising */
5541 return 0x00;
5542 }
5543
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005544 if (hci_find_irk_by_addr(hdev, &params->addr,
5545 params->addr_type)) {
5546 /* White list can not be used with RPAs */
5547 return 0x00;
5548 }
5549
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005550 white_list_entries++;
5551 add_to_white_list(req, params);
5552 }
5553
5554 /* Select filter policy to use white list */
5555 return 0x01;
5556}
5557
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005558void hci_req_add_le_passive_scan(struct hci_request *req)
5559{
5560 struct hci_cp_le_set_scan_param param_cp;
5561 struct hci_cp_le_set_scan_enable enable_cp;
5562 struct hci_dev *hdev = req->hdev;
5563 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005564 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005565
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005566 /* Set require_privacy to false since no SCAN_REQ are send
5567 * during passive scanning. Not using an unresolvable address
5568 * here is important so that peer devices using direct
5569 * advertising with our address will be correctly reported
5570 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005571 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005572 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005573 return;
5574
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005575 /* Adding or removing entries from the white list must
5576 * happen before enabling scanning. The controller does
5577 * not allow white list modification while scanning.
5578 */
5579 filter_policy = update_white_list(req);
5580
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005581 memset(&param_cp, 0, sizeof(param_cp));
5582 param_cp.type = LE_SCAN_PASSIVE;
5583 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5584 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5585 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005586 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005587 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5588 &param_cp);
5589
5590 memset(&enable_cp, 0, sizeof(enable_cp));
5591 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005592 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005593 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5594 &enable_cp);
5595}
5596
Andre Guedesa4790db2014-02-26 20:21:47 -03005597static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5598{
5599 if (status)
5600 BT_DBG("HCI request failed to update background scanning: "
5601 "status 0x%2.2x", status);
5602}
5603
5604/* This function controls the background scanning based on hdev->pend_le_conns
5605 * list. If there are pending LE connection we start the background scanning,
5606 * otherwise we stop it.
5607 *
5608 * This function requires the caller holds hdev->lock.
5609 */
5610void hci_update_background_scan(struct hci_dev *hdev)
5611{
Andre Guedesa4790db2014-02-26 20:21:47 -03005612 struct hci_request req;
5613 struct hci_conn *conn;
5614 int err;
5615
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005616 if (!test_bit(HCI_UP, &hdev->flags) ||
5617 test_bit(HCI_INIT, &hdev->flags) ||
5618 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005619 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005620 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005621 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005622 return;
5623
Johan Hedberga70f4b52014-07-07 15:19:50 +03005624 /* No point in doing scanning if LE support hasn't been enabled */
5625 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5626 return;
5627
Johan Hedbergae23ada2014-07-07 13:24:59 +03005628 /* If discovery is active don't interfere with it */
5629 if (hdev->discovery.state != DISCOVERY_STOPPED)
5630 return;
5631
Andre Guedesa4790db2014-02-26 20:21:47 -03005632 hci_req_init(&req, hdev);
5633
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005634 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005635 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005636 /* If there is no pending LE connections or devices
5637 * to be scanned for, we should stop the background
5638 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005639 */
5640
5641 /* If controller is not scanning we are done. */
5642 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5643 return;
5644
5645 hci_req_add_le_scan_disable(&req);
5646
5647 BT_DBG("%s stopping background scanning", hdev->name);
5648 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005649 /* If there is at least one pending LE connection, we should
5650 * keep the background scan running.
5651 */
5652
Andre Guedesa4790db2014-02-26 20:21:47 -03005653 /* If controller is connecting, we should not start scanning
5654 * since some controllers are not able to scan and connect at
5655 * the same time.
5656 */
5657 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5658 if (conn)
5659 return;
5660
Andre Guedes4340a122014-03-10 18:26:24 -03005661 /* If controller is currently scanning, we stop it to ensure we
5662 * don't miss any advertising (due to duplicates filter).
5663 */
5664 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5665 hci_req_add_le_scan_disable(&req);
5666
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005667 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005668
5669 BT_DBG("%s starting background scanning", hdev->name);
5670 }
5671
5672 err = hci_req_run(&req, update_background_scan_complete);
5673 if (err)
5674 BT_ERR("Failed to run HCI request: err %d", err);
5675}
Johan Hedberg432df052014-08-01 11:13:31 +03005676
Johan Hedberg22f433d2014-08-01 11:13:32 +03005677static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5678{
5679 struct bdaddr_list *b;
5680
5681 list_for_each_entry(b, &hdev->whitelist, list) {
5682 struct hci_conn *conn;
5683
5684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5685 if (!conn)
5686 return true;
5687
5688 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5689 return true;
5690 }
5691
5692 return false;
5693}
5694
Johan Hedberg432df052014-08-01 11:13:31 +03005695void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5696{
5697 u8 scan;
5698
5699 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5700 return;
5701
5702 if (!hdev_is_powered(hdev))
5703 return;
5704
5705 if (mgmt_powering_down(hdev))
5706 return;
5707
5708 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005709 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005710 scan = SCAN_PAGE;
5711 else
5712 scan = SCAN_DISABLED;
5713
5714 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5715 return;
5716
5717 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5718 scan |= SCAN_INQUIRY;
5719
5720 if (req)
5721 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5722 else
5723 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5724}