blob: 6c162c8809cf8d5f60592742dd6ffff0cd6afe28 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Marcel Holtmannaf589252014-07-01 14:11:20 +020038#include <net/bluetooth/mgmt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Johan Hedberg970c4e42014-02-18 10:19:33 +020040#include "smp.h"
41
Marcel Holtmannb78752c2010-08-08 23:06:53 -040042static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020043static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020044static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
Sasha Levin3df92b32012-05-27 22:36:56 +020054/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
Marcel Holtmann899de762014-07-11 05:51:58 +020057/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066/* ---- HCI notifications ---- */
67
Marcel Holtmann65164552005-10-28 19:20:48 +020068static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069{
Marcel Holtmann040030e2012-02-20 14:50:37 +010070 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071}
72
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070073/* ---- HCI debugfs entries ---- */
74
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070075static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
Marcel Holtmann111902f2014-06-21 04:53:17 +020081 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070082 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
Marcel Holtmann111902f2014-06-21 04:53:17 +0200107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
Marcel Holtmann111902f2014-06-21 04:53:17 +0200128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
Marcel Holtmann47219832013-10-17 17:24:15 -0700203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700210 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700211
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700218
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700219 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288}
289
290static int link_keys_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, link_keys_show, inode->i_private);
293}
294
295static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300};
301
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700302static int dev_class_show(struct seq_file *f, void *ptr)
303{
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312}
313
314static int dev_class_open(struct inode *inode, struct file *file)
315{
316 return single_open(file, dev_class_show, inode->i_private);
317}
318
319static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324};
325
Marcel Holtmann041000b2013-10-17 12:02:31 -0700326static int voice_setting_get(void *data, u64 *val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700340static int auto_accept_delay_set(void *data, u64 val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351static int auto_accept_delay_get(void *data, u64 *val)
352{
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360}
361
362DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800365static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367{
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
Marcel Holtmann111902f2014-06-21 04:53:17 +0200371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375}
376
377static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380{
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
Marcel Holtmann111902f2014-06-21 04:53:17 +0200396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800397 return -EALREADY;
398
Marcel Holtmann111902f2014-06-21 04:53:17 +0200399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800400
401 return count;
402}
403
404static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409};
410
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800411static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413{
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421}
422
423static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427};
428
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700429static int idle_timeout_set(void *data, u64 val)
430{
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700437 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443static int idle_timeout_get(void *data, u64 *val)
444{
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452}
453
454DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200457static int rpa_timeout_set(void *data, u64 val)
458{
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472}
473
474static int rpa_timeout_get(void *data, u64 *val)
475{
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483}
484
485DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700488static int sniff_min_interval_set(void *data, u64 val)
489{
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700496 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700497 hci_dev_unlock(hdev);
498
499 return 0;
500}
501
502static int sniff_min_interval_get(void *data, u64 *val)
503{
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516static int sniff_max_interval_set(void *data, u64 val)
517{
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700524 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700525 hci_dev_unlock(hdev);
526
527 return 0;
528}
529
530static int sniff_max_interval_get(void *data, u64 *val)
531{
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200544static int conn_info_min_age_set(void *data, u64 val)
545{
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556}
557
558static int conn_info_min_age_get(void *data, u64 *val)
559{
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572static int conn_info_max_age_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int conn_info_max_age_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
Marcel Holtmannac345812014-02-23 12:44:25 -0800600static int identity_show(struct seq_file *f, void *p)
601{
602 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200603 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
Johan Hedberga1f4c312014-02-27 14:05:41 +0200608 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800609
Johan Hedberga1f4c312014-02-27 14:05:41 +0200610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800611 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800612
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int identity_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, identity_show, inode->i_private);
621}
622
623static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800630static int random_address_show(struct seq_file *f, void *p)
631{
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639}
640
641static int random_address_open(struct inode *inode, struct file *file)
642{
643 return single_open(file, random_address_show, inode->i_private);
644}
645
646static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651};
652
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700653static int static_address_show(struct seq_file *f, void *p)
654{
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662}
663
664static int static_address_open(struct inode *inode, struct file *file)
665{
666 return single_open(file, static_address_show, inode->i_private);
667}
668
669static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674};
675
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800676static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700679{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700682
Marcel Holtmann111902f2014-06-21 04:53:17 +0200683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687}
688
689static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
692{
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
697
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
700
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700706 return -EINVAL;
707
Marcel Holtmann111902f2014-06-21 04:53:17 +0200708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800709 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700710
Marcel Holtmann111902f2014-06-21 04:53:17 +0200711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800712
713 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700714}
715
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800716static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721};
Marcel Holtmann92202182013-10-18 16:38:10 -0700722
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800723static int white_list_show(struct seq_file *f, void *ptr)
724{
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734}
735
736static int white_list_open(struct inode *inode, struct file *file)
737{
738 return single_open(file, white_list_show, inode->i_private);
739}
740
741static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746};
747
Marcel Holtmann3698d702014-02-18 21:54:49 -0800748static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749{
750 struct hci_dev *hdev = f->private;
751 struct list_head *p, *n;
752
753 hci_dev_lock(hdev);
754 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
755 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
756 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
757 &irk->bdaddr, irk->addr_type,
758 16, irk->val, &irk->rpa);
759 }
760 hci_dev_unlock(hdev);
761
762 return 0;
763}
764
765static int identity_resolving_keys_open(struct inode *inode, struct file *file)
766{
767 return single_open(file, identity_resolving_keys_show,
768 inode->i_private);
769}
770
771static const struct file_operations identity_resolving_keys_fops = {
772 .open = identity_resolving_keys_open,
773 .read = seq_read,
774 .llseek = seq_lseek,
775 .release = single_release,
776};
777
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700778static int long_term_keys_show(struct seq_file *f, void *ptr)
779{
780 struct hci_dev *hdev = f->private;
781 struct list_head *p, *n;
782
783 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800784 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700785 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800786 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700787 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
788 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800789 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700790 }
791 hci_dev_unlock(hdev);
792
793 return 0;
794}
795
796static int long_term_keys_open(struct inode *inode, struct file *file)
797{
798 return single_open(file, long_term_keys_show, inode->i_private);
799}
800
801static const struct file_operations long_term_keys_fops = {
802 .open = long_term_keys_open,
803 .read = seq_read,
804 .llseek = seq_lseek,
805 .release = single_release,
806};
807
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700808static int conn_min_interval_set(void *data, u64 val)
809{
810 struct hci_dev *hdev = data;
811
812 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
813 return -EINVAL;
814
815 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700816 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700817 hci_dev_unlock(hdev);
818
819 return 0;
820}
821
822static int conn_min_interval_get(void *data, u64 *val)
823{
824 struct hci_dev *hdev = data;
825
826 hci_dev_lock(hdev);
827 *val = hdev->le_conn_min_interval;
828 hci_dev_unlock(hdev);
829
830 return 0;
831}
832
833DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
834 conn_min_interval_set, "%llu\n");
835
836static int conn_max_interval_set(void *data, u64 val)
837{
838 struct hci_dev *hdev = data;
839
840 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
841 return -EINVAL;
842
843 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700844 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700845 hci_dev_unlock(hdev);
846
847 return 0;
848}
849
850static int conn_max_interval_get(void *data, u64 *val)
851{
852 struct hci_dev *hdev = data;
853
854 hci_dev_lock(hdev);
855 *val = hdev->le_conn_max_interval;
856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
862 conn_max_interval_set, "%llu\n");
863
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200864static int conn_latency_set(void *data, u64 val)
865{
866 struct hci_dev *hdev = data;
867
868 if (val > 0x01f3)
869 return -EINVAL;
870
871 hci_dev_lock(hdev);
872 hdev->le_conn_latency = val;
873 hci_dev_unlock(hdev);
874
875 return 0;
876}
877
878static int conn_latency_get(void *data, u64 *val)
879{
880 struct hci_dev *hdev = data;
881
882 hci_dev_lock(hdev);
883 *val = hdev->le_conn_latency;
884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
890 conn_latency_set, "%llu\n");
891
Marcel Holtmannf1649572014-06-30 12:34:38 +0200892static int supervision_timeout_set(void *data, u64 val)
893{
894 struct hci_dev *hdev = data;
895
896 if (val < 0x000a || val > 0x0c80)
897 return -EINVAL;
898
899 hci_dev_lock(hdev);
900 hdev->le_supv_timeout = val;
901 hci_dev_unlock(hdev);
902
903 return 0;
904}
905
906static int supervision_timeout_get(void *data, u64 *val)
907{
908 struct hci_dev *hdev = data;
909
910 hci_dev_lock(hdev);
911 *val = hdev->le_supv_timeout;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
918 supervision_timeout_set, "%llu\n");
919
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800920static int adv_channel_map_set(void *data, u64 val)
921{
922 struct hci_dev *hdev = data;
923
924 if (val < 0x01 || val > 0x07)
925 return -EINVAL;
926
927 hci_dev_lock(hdev);
928 hdev->le_adv_channel_map = val;
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int adv_channel_map_get(void *data, u64 *val)
935{
936 struct hci_dev *hdev = data;
937
938 hci_dev_lock(hdev);
939 *val = hdev->le_adv_channel_map;
940 hci_dev_unlock(hdev);
941
942 return 0;
943}
944
945DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
946 adv_channel_map_set, "%llu\n");
947
Georg Lukas729a1052014-07-26 13:59:58 +0200948static int adv_min_interval_set(void *data, u64 val)
Jukka Rissanen89863102013-12-11 17:05:38 +0200949{
Georg Lukas729a1052014-07-26 13:59:58 +0200950 struct hci_dev *hdev = data;
Jukka Rissanen89863102013-12-11 17:05:38 +0200951
Georg Lukas729a1052014-07-26 13:59:58 +0200952 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
Jukka Rissanen89863102013-12-11 17:05:38 +0200953 return -EINVAL;
954
Andre Guedes7d474e02014-02-26 20:21:54 -0300955 hci_dev_lock(hdev);
Georg Lukas729a1052014-07-26 13:59:58 +0200956 hdev->le_adv_min_interval = val;
Andre Guedes7d474e02014-02-26 20:21:54 -0300957 hci_dev_unlock(hdev);
958
959 return 0;
960}
961
Georg Lukas729a1052014-07-26 13:59:58 +0200962static int adv_min_interval_get(void *data, u64 *val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300963{
Georg Lukas729a1052014-07-26 13:59:58 +0200964 struct hci_dev *hdev = data;
965
966 hci_dev_lock(hdev);
967 *val = hdev->le_adv_min_interval;
968 hci_dev_unlock(hdev);
969
970 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -0300971}
972
Georg Lukas729a1052014-07-26 13:59:58 +0200973DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
974 adv_min_interval_set, "%llu\n");
975
976static int adv_max_interval_set(void *data, u64 val)
Andre Guedes7d474e02014-02-26 20:21:54 -0300977{
Georg Lukas729a1052014-07-26 13:59:58 +0200978 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300979
Georg Lukas729a1052014-07-26 13:59:58 +0200980 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
Andre Guedes7d474e02014-02-26 20:21:54 -0300981 return -EINVAL;
982
Georg Lukas729a1052014-07-26 13:59:58 +0200983 hci_dev_lock(hdev);
984 hdev->le_adv_max_interval = val;
985 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300986
Georg Lukas729a1052014-07-26 13:59:58 +0200987 return 0;
988}
Andre Guedes7d474e02014-02-26 20:21:54 -0300989
Georg Lukas729a1052014-07-26 13:59:58 +0200990static int adv_max_interval_get(void *data, u64 *val)
991{
992 struct hci_dev *hdev = data;
Andre Guedes7d474e02014-02-26 20:21:54 -0300993
Georg Lukas729a1052014-07-26 13:59:58 +0200994 hci_dev_lock(hdev);
995 *val = hdev->le_adv_max_interval;
996 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300997
Georg Lukas729a1052014-07-26 13:59:58 +0200998 return 0;
999}
Andre Guedes7d474e02014-02-26 20:21:54 -03001000
Georg Lukas729a1052014-07-26 13:59:58 +02001001DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
1002 adv_max_interval_set, "%llu\n");
Andre Guedes7d474e02014-02-26 20:21:54 -03001003
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001004static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -03001005{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001006 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -03001007 struct hci_conn_params *p;
Marcel Holtmann40f49382014-11-02 21:46:52 +01001008 struct bdaddr_list *b;
Andre Guedes7d474e02014-02-26 20:21:54 -03001009
Andre Guedes7d474e02014-02-26 20:21:54 -03001010 hci_dev_lock(hdev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001011 list_for_each_entry(b, &hdev->whitelist, list)
1012 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Andre Guedes7d474e02014-02-26 20:21:54 -03001013 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann40f49382014-11-02 21:46:52 +01001014 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -03001015 p->auto_connect);
Andre Guedes7d474e02014-02-26 20:21:54 -03001016 }
Andre Guedes7d474e02014-02-26 20:21:54 -03001017 hci_dev_unlock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -03001018
Andre Guedes7d474e02014-02-26 20:21:54 -03001019 return 0;
Andre Guedes7d474e02014-02-26 20:21:54 -03001020}
1021
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001022static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -03001023{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001024 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -03001025}
1026
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001027static const struct file_operations device_list_fops = {
1028 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -03001029 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -03001030 .llseek = seq_lseek,
1031 .release = single_release,
1032};
1033
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034/* ---- HCI requests ---- */
1035
Johan Hedberg42c6b122013-03-05 20:37:49 +02001036static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001038 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039
1040 if (hdev->req_status == HCI_REQ_PEND) {
1041 hdev->req_result = result;
1042 hdev->req_status = HCI_REQ_DONE;
1043 wake_up_interruptible(&hdev->req_wait_q);
1044 }
1045}
1046
1047static void hci_req_cancel(struct hci_dev *hdev, int err)
1048{
1049 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1050
1051 if (hdev->req_status == HCI_REQ_PEND) {
1052 hdev->req_result = err;
1053 hdev->req_status = HCI_REQ_CANCELED;
1054 wake_up_interruptible(&hdev->req_wait_q);
1055 }
1056}
1057
Fengguang Wu77a63e02013-04-20 16:24:31 +03001058static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1059 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001060{
1061 struct hci_ev_cmd_complete *ev;
1062 struct hci_event_hdr *hdr;
1063 struct sk_buff *skb;
1064
1065 hci_dev_lock(hdev);
1066
1067 skb = hdev->recv_evt;
1068 hdev->recv_evt = NULL;
1069
1070 hci_dev_unlock(hdev);
1071
1072 if (!skb)
1073 return ERR_PTR(-ENODATA);
1074
1075 if (skb->len < sizeof(*hdr)) {
1076 BT_ERR("Too short HCI event");
1077 goto failed;
1078 }
1079
1080 hdr = (void *) skb->data;
1081 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1082
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001083 if (event) {
1084 if (hdr->evt != event)
1085 goto failed;
1086 return skb;
1087 }
1088
Johan Hedberg75e84b72013-04-02 13:35:04 +03001089 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1090 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1091 goto failed;
1092 }
1093
1094 if (skb->len < sizeof(*ev)) {
1095 BT_ERR("Too short cmd_complete event");
1096 goto failed;
1097 }
1098
1099 ev = (void *) skb->data;
1100 skb_pull(skb, sizeof(*ev));
1101
1102 if (opcode == __le16_to_cpu(ev->opcode))
1103 return skb;
1104
1105 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1106 __le16_to_cpu(ev->opcode));
1107
1108failed:
1109 kfree_skb(skb);
1110 return ERR_PTR(-ENODATA);
1111}
1112
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001113struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001114 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001115{
1116 DECLARE_WAITQUEUE(wait, current);
1117 struct hci_request req;
1118 int err = 0;
1119
1120 BT_DBG("%s", hdev->name);
1121
1122 hci_req_init(&req, hdev);
1123
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001124 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001125
1126 hdev->req_status = HCI_REQ_PEND;
1127
Johan Hedberg75e84b72013-04-02 13:35:04 +03001128 add_wait_queue(&hdev->req_wait_q, &wait);
1129 set_current_state(TASK_INTERRUPTIBLE);
1130
Chan-yeol Park039fada2014-10-31 14:23:06 +09001131 err = hci_req_run(&req, hci_req_sync_complete);
1132 if (err < 0) {
1133 remove_wait_queue(&hdev->req_wait_q, &wait);
1134 return ERR_PTR(err);
1135 }
1136
Johan Hedberg75e84b72013-04-02 13:35:04 +03001137 schedule_timeout(timeout);
1138
1139 remove_wait_queue(&hdev->req_wait_q, &wait);
1140
1141 if (signal_pending(current))
1142 return ERR_PTR(-EINTR);
1143
1144 switch (hdev->req_status) {
1145 case HCI_REQ_DONE:
1146 err = -bt_to_errno(hdev->req_result);
1147 break;
1148
1149 case HCI_REQ_CANCELED:
1150 err = -hdev->req_result;
1151 break;
1152
1153 default:
1154 err = -ETIMEDOUT;
1155 break;
1156 }
1157
1158 hdev->req_status = hdev->req_result = 0;
1159
1160 BT_DBG("%s end: err %d", hdev->name, err);
1161
1162 if (err < 0)
1163 return ERR_PTR(err);
1164
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001165 return hci_get_cmd_complete(hdev, opcode, event);
1166}
1167EXPORT_SYMBOL(__hci_cmd_sync_ev);
1168
1169struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001170 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001171{
1172 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001173}
1174EXPORT_SYMBOL(__hci_cmd_sync);
1175
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001177static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001178 void (*func)(struct hci_request *req,
1179 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001180 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 DECLARE_WAITQUEUE(wait, current);
1184 int err = 0;
1185
1186 BT_DBG("%s start", hdev->name);
1187
Johan Hedberg42c6b122013-03-05 20:37:49 +02001188 hci_req_init(&req, hdev);
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 hdev->req_status = HCI_REQ_PEND;
1191
Johan Hedberg42c6b122013-03-05 20:37:49 +02001192 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001193
Chan-yeol Park039fada2014-10-31 14:23:06 +09001194 add_wait_queue(&hdev->req_wait_q, &wait);
1195 set_current_state(TASK_INTERRUPTIBLE);
1196
Johan Hedberg42c6b122013-03-05 20:37:49 +02001197 err = hci_req_run(&req, hci_req_sync_complete);
1198 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001199 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001200
Chan-yeol Park039fada2014-10-31 14:23:06 +09001201 remove_wait_queue(&hdev->req_wait_q, &wait);
1202
Andre Guedes920c8302013-03-08 11:20:15 -03001203 /* ENODATA means the HCI request command queue is empty.
1204 * This can happen when a request with conditionals doesn't
1205 * trigger any commands to be sent. This is normal behavior
1206 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207 */
Andre Guedes920c8302013-03-08 11:20:15 -03001208 if (err == -ENODATA)
1209 return 0;
1210
1211 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001212 }
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 schedule_timeout(timeout);
1215
1216 remove_wait_queue(&hdev->req_wait_q, &wait);
1217
1218 if (signal_pending(current))
1219 return -EINTR;
1220
1221 switch (hdev->req_status) {
1222 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001223 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 break;
1225
1226 case HCI_REQ_CANCELED:
1227 err = -hdev->req_result;
1228 break;
1229
1230 default:
1231 err = -ETIMEDOUT;
1232 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001233 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
Johan Hedberga5040ef2011-01-10 13:28:59 +02001235 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 BT_DBG("%s end: err %d", hdev->name, err);
1238
1239 return err;
1240}
1241
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001243 void (*req)(struct hci_request *req,
1244 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001245 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
1247 int ret;
1248
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001249 if (!test_bit(HCI_UP, &hdev->flags))
1250 return -ENETDOWN;
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 /* Serialize all requests */
1253 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001254 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 hci_req_unlock(hdev);
1256
1257 return ret;
1258}
1259
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001262 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001265 set_bit(HCI_RESET, &req->hdev->flags);
1266 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267}
1268
Johan Hedberg42c6b122013-03-05 20:37:49 +02001269static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001271 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001274 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001276 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001277 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001278
1279 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281}
1282
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001284{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001285 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001286
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001287 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001288 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001289
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001290 /* Read Local Supported Commands */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1292
1293 /* Read Local Supported Features */
1294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1295
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001296 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001297 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001298
1299 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001300 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001301
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001302 /* Read Flow Control Mode */
1303 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1304
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001305 /* Read Location Data */
1306 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001307}
1308
Johan Hedberg42c6b122013-03-05 20:37:49 +02001309static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001310{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001311 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001312
1313 BT_DBG("%s %ld", hdev->name, opt);
1314
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001315 /* Reset */
1316 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001317 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001318
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001319 switch (hdev->dev_type) {
1320 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001321 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001322 break;
1323
1324 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001325 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001326 break;
1327
1328 default:
1329 BT_ERR("Unknown device type %d", hdev->dev_type);
1330 break;
1331 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001332}
1333
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001336 struct hci_dev *hdev = req->hdev;
1337
Johan Hedberg2177bab2013-03-05 20:37:43 +02001338 __le16 param;
1339 __u8 flt_type;
1340
1341 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001342 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343
1344 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001345 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001346
1347 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001348 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001349
1350 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001351 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001352
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001353 /* Read Number of Supported IAC */
1354 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1355
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001356 /* Read Current IAC LAP */
1357 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1358
Johan Hedberg2177bab2013-03-05 20:37:43 +02001359 /* Clear Event Filters */
1360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001362
1363 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001364 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001366
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001367 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1368 * but it does not support page scan related HCI commands.
1369 */
1370 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001371 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1372 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1373 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001374}
1375
Johan Hedberg42c6b122013-03-05 20:37:49 +02001376static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001377{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001378 struct hci_dev *hdev = req->hdev;
1379
Johan Hedberg2177bab2013-03-05 20:37:43 +02001380 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001382
1383 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001386 /* Read LE Supported States */
1387 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1388
Johan Hedberg2177bab2013-03-05 20:37:43 +02001389 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001392 /* Clear LE White List */
1393 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001394
1395 /* LE-only controllers have LE implicitly enabled */
1396 if (!lmp_bredr_capable(hdev))
1397 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001398}
1399
1400static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1401{
1402 if (lmp_ext_inq_capable(hdev))
1403 return 0x02;
1404
1405 if (lmp_inq_rssi_capable(hdev))
1406 return 0x01;
1407
1408 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1409 hdev->lmp_subver == 0x0757)
1410 return 0x01;
1411
1412 if (hdev->manufacturer == 15) {
1413 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1414 return 0x01;
1415 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1416 return 0x01;
1417 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1418 return 0x01;
1419 }
1420
1421 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1422 hdev->lmp_subver == 0x1805)
1423 return 0x01;
1424
1425 return 0x00;
1426}
1427
Johan Hedberg42c6b122013-03-05 20:37:49 +02001428static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001429{
1430 u8 mode;
1431
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433
Johan Hedberg42c6b122013-03-05 20:37:49 +02001434 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001435}
1436
Johan Hedberg42c6b122013-03-05 20:37:49 +02001437static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001438{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001439 struct hci_dev *hdev = req->hdev;
1440
Johan Hedberg2177bab2013-03-05 20:37:43 +02001441 /* The second byte is 0xff instead of 0x9f (two reserved bits
1442 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1443 * command otherwise.
1444 */
1445 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1446
1447 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1448 * any event mask for pre 1.2 devices.
1449 */
1450 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1451 return;
1452
1453 if (lmp_bredr_capable(hdev)) {
1454 events[4] |= 0x01; /* Flow Specification Complete */
1455 events[4] |= 0x02; /* Inquiry Result with RSSI */
1456 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1457 events[5] |= 0x08; /* Synchronous Connection Complete */
1458 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001459 } else {
1460 /* Use a different default for LE-only devices */
1461 memset(events, 0, sizeof(events));
1462 events[0] |= 0x10; /* Disconnection Complete */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001463 events[1] |= 0x08; /* Read Remote Version Information Complete */
1464 events[1] |= 0x20; /* Command Complete */
1465 events[1] |= 0x40; /* Command Status */
1466 events[1] |= 0x80; /* Hardware Error */
1467 events[2] |= 0x04; /* Number of Completed Packets */
1468 events[3] |= 0x02; /* Data Buffer Overflow */
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001469
1470 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1471 events[0] |= 0x80; /* Encryption Change */
1472 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1473 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001474 }
1475
1476 if (lmp_inq_rssi_capable(hdev))
1477 events[4] |= 0x02; /* Inquiry Result with RSSI */
1478
1479 if (lmp_sniffsubr_capable(hdev))
1480 events[5] |= 0x20; /* Sniff Subrating */
1481
1482 if (lmp_pause_enc_capable(hdev))
1483 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1484
1485 if (lmp_ext_inq_capable(hdev))
1486 events[5] |= 0x40; /* Extended Inquiry Result */
1487
1488 if (lmp_no_flush_capable(hdev))
1489 events[7] |= 0x01; /* Enhanced Flush Complete */
1490
1491 if (lmp_lsto_capable(hdev))
1492 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1493
1494 if (lmp_ssp_capable(hdev)) {
1495 events[6] |= 0x01; /* IO Capability Request */
1496 events[6] |= 0x02; /* IO Capability Response */
1497 events[6] |= 0x04; /* User Confirmation Request */
1498 events[6] |= 0x08; /* User Passkey Request */
1499 events[6] |= 0x10; /* Remote OOB Data Request */
1500 events[6] |= 0x20; /* Simple Pairing Complete */
1501 events[7] |= 0x04; /* User Passkey Notification */
1502 events[7] |= 0x08; /* Keypress Notification */
1503 events[7] |= 0x10; /* Remote Host Supported
1504 * Features Notification
1505 */
1506 }
1507
1508 if (lmp_le_capable(hdev))
1509 events[7] |= 0x20; /* LE Meta-Event */
1510
Johan Hedberg42c6b122013-03-05 20:37:49 +02001511 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512}
1513
Johan Hedberg42c6b122013-03-05 20:37:49 +02001514static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001515{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001516 struct hci_dev *hdev = req->hdev;
1517
Johan Hedberg2177bab2013-03-05 20:37:43 +02001518 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001519 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001520 else
1521 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522
1523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001525
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001526 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1527 * local supported commands HCI command.
1528 */
1529 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001530 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001531
1532 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001533 /* When SSP is available, then the host features page
1534 * should also be available as well. However some
1535 * controllers list the max_page as 0 as long as SSP
1536 * has not been enabled. To achieve proper debugging
1537 * output, force the minimum max_page to 1 at least.
1538 */
1539 hdev->max_page = 0x01;
1540
Johan Hedberg2177bab2013-03-05 20:37:43 +02001541 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1542 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001543 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1544 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001545 } else {
1546 struct hci_cp_write_eir cp;
1547
1548 memset(hdev->eir, 0, sizeof(hdev->eir));
1549 memset(&cp, 0, sizeof(cp));
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552 }
1553 }
1554
1555 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557
1558 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001559 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560
1561 if (lmp_ext_feat_capable(hdev)) {
1562 struct hci_cp_read_local_ext_features cp;
1563
1564 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001565 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1566 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001567 }
1568
1569 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1570 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001571 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1572 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001573 }
1574}
1575
Johan Hedberg42c6b122013-03-05 20:37:49 +02001576static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001577{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001578 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001579 struct hci_cp_write_def_link_policy cp;
1580 u16 link_policy = 0;
1581
1582 if (lmp_rswitch_capable(hdev))
1583 link_policy |= HCI_LP_RSWITCH;
1584 if (lmp_hold_capable(hdev))
1585 link_policy |= HCI_LP_HOLD;
1586 if (lmp_sniff_capable(hdev))
1587 link_policy |= HCI_LP_SNIFF;
1588 if (lmp_park_capable(hdev))
1589 link_policy |= HCI_LP_PARK;
1590
1591 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001592 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001593}
1594
Johan Hedberg42c6b122013-03-05 20:37:49 +02001595static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001596{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001597 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001598 struct hci_cp_write_le_host_supported cp;
1599
Johan Hedbergc73eee92013-04-19 18:35:21 +03001600 /* LE-only devices do not support explicit enablement */
1601 if (!lmp_bredr_capable(hdev))
1602 return;
1603
Johan Hedberg2177bab2013-03-05 20:37:43 +02001604 memset(&cp, 0, sizeof(cp));
1605
1606 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1607 cp.le = 0x01;
Marcel Holtmann32226e42014-07-24 20:04:16 +02001608 cp.simul = 0x00;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001609 }
1610
1611 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001612 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1613 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001614}
1615
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001616static void hci_set_event_mask_page_2(struct hci_request *req)
1617{
1618 struct hci_dev *hdev = req->hdev;
1619 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1620
1621 /* If Connectionless Slave Broadcast master role is supported
1622 * enable all necessary events for it.
1623 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001624 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001625 events[1] |= 0x40; /* Triggered Clock Capture */
1626 events[1] |= 0x80; /* Synchronization Train Complete */
1627 events[2] |= 0x10; /* Slave Page Response Timeout */
1628 events[2] |= 0x20; /* CSB Channel Map Change */
1629 }
1630
1631 /* If Connectionless Slave Broadcast slave role is supported
1632 * enable all necessary events for it.
1633 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001634 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001635 events[2] |= 0x01; /* Synchronization Train Received */
1636 events[2] |= 0x02; /* CSB Receive */
1637 events[2] |= 0x04; /* CSB Timeout */
1638 events[2] |= 0x08; /* Truncated Page Complete */
1639 }
1640
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001641 /* Enable Authenticated Payload Timeout Expired event if supported */
Marcel Holtmanncd7ca0e2014-07-09 09:49:05 +02001642 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001643 events[2] |= 0x80;
1644
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001645 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1646}
1647
Johan Hedberg42c6b122013-03-05 20:37:49 +02001648static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001649{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001650 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001651 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001652
Marcel Holtmann0da71f12014-07-12 23:36:16 +02001653 hci_setup_event_mask(req);
1654
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001655 /* Some Broadcom based Bluetooth controllers do not support the
1656 * Delete Stored Link Key command. They are clearly indicating its
1657 * absence in the bit mask of supported commands.
1658 *
1659 * Check the supported commands and only if the the command is marked
1660 * as supported send it. If not supported assume that the controller
1661 * does not have actual support for stored link keys which makes this
1662 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001663 *
1664 * Some controllers indicate that they support handling deleting
1665 * stored link keys, but they don't. The quirk lets a driver
1666 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001667 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001668 if (hdev->commands[6] & 0x80 &&
1669 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001670 struct hci_cp_delete_stored_link_key cp;
1671
1672 bacpy(&cp.bdaddr, BDADDR_ANY);
1673 cp.delete_all = 0x01;
1674 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1675 sizeof(cp), &cp);
1676 }
1677
Johan Hedberg2177bab2013-03-05 20:37:43 +02001678 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001679 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001680
Andre Guedes9193c6e2014-07-01 18:10:09 -03001681 if (lmp_le_capable(hdev)) {
1682 u8 events[8];
1683
1684 memset(events, 0, sizeof(events));
Marcel Holtmann4d6c7052014-07-13 00:29:22 +02001685 events[0] = 0x0f;
1686
1687 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1688 events[0] |= 0x10; /* LE Long Term Key Request */
Andre Guedes662bc2e2014-07-01 18:10:10 -03001689
1690 /* If controller supports the Connection Parameters Request
1691 * Link Layer Procedure, enable the corresponding event.
1692 */
1693 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1694 events[0] |= 0x20; /* LE Remote Connection
1695 * Parameter Request
1696 */
1697
Andre Guedes9193c6e2014-07-01 18:10:09 -03001698 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1699 events);
1700
Marcel Holtmann15a49cc2014-07-12 23:20:50 +02001701 if (hdev->commands[25] & 0x40) {
1702 /* Read LE Advertising Channel TX Power */
1703 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1704 }
1705
Johan Hedberg42c6b122013-03-05 20:37:49 +02001706 hci_set_le_support(req);
Andre Guedes9193c6e2014-07-01 18:10:09 -03001707 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001708
1709 /* Read features beyond page 1 if available */
1710 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1711 struct hci_cp_read_local_ext_features cp;
1712
1713 cp.page = p;
1714 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1715 sizeof(cp), &cp);
1716 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001717}
1718
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001719static void hci_init4_req(struct hci_request *req, unsigned long opt)
1720{
1721 struct hci_dev *hdev = req->hdev;
1722
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001723 /* Set event mask page 2 if the HCI command for it is supported */
1724 if (hdev->commands[22] & 0x04)
1725 hci_set_event_mask_page_2(req);
1726
Marcel Holtmann109e3192014-07-23 19:24:56 +02001727 /* Read local codec list if the HCI command is supported */
1728 if (hdev->commands[29] & 0x20)
1729 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1730
Marcel Holtmannf4fe73e2014-07-23 19:24:57 +02001731 /* Get MWS transport configuration if the HCI command is supported */
1732 if (hdev->commands[30] & 0x08)
1733 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1734
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001735 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001736 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001737 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001738
1739 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001740 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001741 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001742 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1743 u8 support = 0x01;
1744 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1745 sizeof(support), &support);
1746 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001747}
1748
Johan Hedberg2177bab2013-03-05 20:37:43 +02001749static int __hci_init(struct hci_dev *hdev)
1750{
1751 int err;
1752
1753 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1754 if (err < 0)
1755 return err;
1756
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001757 /* The Device Under Test (DUT) mode is special and available for
1758 * all controller types. So just create it early on.
1759 */
1760 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1761 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1762 &dut_mode_fops);
1763 }
1764
Johan Hedberg2177bab2013-03-05 20:37:43 +02001765 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1766 * BR/EDR/LE type controllers. AMP controllers only need the
1767 * first stage init.
1768 */
1769 if (hdev->dev_type != HCI_BREDR)
1770 return 0;
1771
1772 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1773 if (err < 0)
1774 return err;
1775
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001776 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1777 if (err < 0)
1778 return err;
1779
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001780 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1781 if (err < 0)
1782 return err;
1783
1784 /* Only create debugfs entries during the initial setup
1785 * phase and not every time the controller gets powered on.
1786 */
1787 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1788 return 0;
1789
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001790 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1791 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001792 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1793 &hdev->manufacturer);
1794 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1795 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann40f49382014-11-02 21:46:52 +01001796 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1797 &device_list_fops);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001798 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1799 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001800 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1801
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001802 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1803 &conn_info_min_age_fops);
1804 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1805 &conn_info_max_age_fops);
1806
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001807 if (lmp_bredr_capable(hdev)) {
1808 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1809 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001810 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1811 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001812 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1813 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001814 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1815 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001816 }
1817
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001818 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001819 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1820 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001821 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1822 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001823 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1824 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001825 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001826
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001827 if (lmp_sniff_capable(hdev)) {
1828 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1829 hdev, &idle_timeout_fops);
1830 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1831 hdev, &sniff_min_interval_fops);
1832 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1833 hdev, &sniff_max_interval_fops);
1834 }
1835
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001836 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001837 debugfs_create_file("identity", 0400, hdev->debugfs,
1838 hdev, &identity_fops);
1839 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1840 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001841 debugfs_create_file("random_address", 0444, hdev->debugfs,
1842 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001843 debugfs_create_file("static_address", 0444, hdev->debugfs,
1844 hdev, &static_address_fops);
1845
1846 /* For controllers with a public address, provide a debug
1847 * option to force the usage of the configured static
1848 * address. By default the public address is used.
1849 */
1850 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1851 debugfs_create_file("force_static_address", 0644,
1852 hdev->debugfs, hdev,
1853 &force_static_address_fops);
1854
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001855 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1856 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001857 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1858 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001859 debugfs_create_file("identity_resolving_keys", 0400,
1860 hdev->debugfs, hdev,
1861 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001862 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1863 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001864 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1865 hdev, &conn_min_interval_fops);
1866 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1867 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001868 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1869 hdev, &conn_latency_fops);
Marcel Holtmannf1649572014-06-30 12:34:38 +02001870 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1871 hdev, &supervision_timeout_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001872 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1873 hdev, &adv_channel_map_fops);
Georg Lukas729a1052014-07-26 13:59:58 +02001874 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1875 hdev, &adv_min_interval_fops);
1876 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1877 hdev, &adv_max_interval_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001878 debugfs_create_u16("discov_interleaved_timeout", 0644,
1879 hdev->debugfs,
1880 &hdev->discov_interleaved_timeout);
Johan Hedberg54506912014-08-08 09:32:51 +03001881
Johan Hedberg711eafe2014-08-08 09:32:52 +03001882 smp_register(hdev);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001883 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001884
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001885 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001886}
1887
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001888static void hci_init0_req(struct hci_request *req, unsigned long opt)
1889{
1890 struct hci_dev *hdev = req->hdev;
1891
1892 BT_DBG("%s %ld", hdev->name, opt);
1893
1894 /* Reset */
1895 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1896 hci_reset_req(req, 0);
1897
1898 /* Read Local Version */
1899 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1900
1901 /* Read BD Address */
1902 if (hdev->set_bdaddr)
1903 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1904}
1905
1906static int __hci_unconf_init(struct hci_dev *hdev)
1907{
1908 int err;
1909
Marcel Holtmanncc78b442014-07-06 13:43:20 +02001910 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1911 return 0;
1912
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02001913 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1914 if (err < 0)
1915 return err;
1916
1917 return 0;
1918}
1919
Johan Hedberg42c6b122013-03-05 20:37:49 +02001920static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
1922 __u8 scan = opt;
1923
Johan Hedberg42c6b122013-03-05 20:37:49 +02001924 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
1926 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001927 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928}
1929
Johan Hedberg42c6b122013-03-05 20:37:49 +02001930static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931{
1932 __u8 auth = opt;
1933
Johan Hedberg42c6b122013-03-05 20:37:49 +02001934 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935
1936 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001937 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938}
1939
Johan Hedberg42c6b122013-03-05 20:37:49 +02001940static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941{
1942 __u8 encrypt = opt;
1943
Johan Hedberg42c6b122013-03-05 20:37:49 +02001944 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001946 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001947 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
Johan Hedberg42c6b122013-03-05 20:37:49 +02001950static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001951{
1952 __le16 policy = cpu_to_le16(opt);
1953
Johan Hedberg42c6b122013-03-05 20:37:49 +02001954 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001955
1956 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001957 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001958}
1959
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001960/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 * Device is held on return. */
1962struct hci_dev *hci_dev_get(int index)
1963{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001964 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965
1966 BT_DBG("%d", index);
1967
1968 if (index < 0)
1969 return NULL;
1970
1971 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001972 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973 if (d->id == index) {
1974 hdev = hci_dev_hold(d);
1975 break;
1976 }
1977 }
1978 read_unlock(&hci_dev_list_lock);
1979 return hdev;
1980}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001983
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001984bool hci_discovery_active(struct hci_dev *hdev)
1985{
1986 struct discovery_state *discov = &hdev->discovery;
1987
Andre Guedes6fbe1952012-02-03 17:47:58 -03001988 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001989 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001990 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001991 return true;
1992
Andre Guedes6fbe1952012-02-03 17:47:58 -03001993 default:
1994 return false;
1995 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001996}
1997
Johan Hedbergff9ef572012-01-04 14:23:45 +02001998void hci_discovery_set_state(struct hci_dev *hdev, int state)
1999{
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002000 int old_state = hdev->discovery.state;
2001
Johan Hedbergff9ef572012-01-04 14:23:45 +02002002 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2003
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002004 if (old_state == state)
Johan Hedbergff9ef572012-01-04 14:23:45 +02002005 return;
2006
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002007 hdev->discovery.state = state;
2008
Johan Hedbergff9ef572012-01-04 14:23:45 +02002009 switch (state) {
2010 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03002011 hci_update_background_scan(hdev);
2012
Johan Hedbergbb3e0a32014-07-07 13:24:58 +03002013 if (old_state != DISCOVERY_STARTING)
Andre Guedes7b99b652012-02-13 15:41:02 -03002014 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02002015 break;
2016 case DISCOVERY_STARTING:
2017 break;
Andre Guedes343f9352012-02-17 20:39:37 -03002018 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02002019 mgmt_discovering(hdev, 1);
2020 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002021 case DISCOVERY_RESOLVING:
2022 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02002023 case DISCOVERY_STOPPING:
2024 break;
2025 }
Johan Hedbergff9ef572012-01-04 14:23:45 +02002026}
2027
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002028void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
Johan Hedberg30883512012-01-04 14:16:21 +02002030 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002031 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Johan Hedberg561aafb2012-01-04 13:31:59 +02002033 list_for_each_entry_safe(p, n, &cache->all, all) {
2034 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002035 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002037
2038 INIT_LIST_HEAD(&cache->unknown);
2039 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040}
2041
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002042struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2043 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
Johan Hedberg30883512012-01-04 14:16:21 +02002045 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 struct inquiry_entry *e;
2047
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002048 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
Johan Hedberg561aafb2012-01-04 13:31:59 +02002050 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002052 return e;
2053 }
2054
2055 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056}
2057
Johan Hedberg561aafb2012-01-04 13:31:59 +02002058struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002059 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02002060{
Johan Hedberg30883512012-01-04 14:16:21 +02002061 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002062 struct inquiry_entry *e;
2063
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002064 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02002065
2066 list_for_each_entry(e, &cache->unknown, list) {
2067 if (!bacmp(&e->data.bdaddr, bdaddr))
2068 return e;
2069 }
2070
2071 return NULL;
2072}
2073
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002074struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002075 bdaddr_t *bdaddr,
2076 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002077{
2078 struct discovery_state *cache = &hdev->discovery;
2079 struct inquiry_entry *e;
2080
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002081 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02002082
2083 list_for_each_entry(e, &cache->resolve, list) {
2084 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2085 return e;
2086 if (!bacmp(&e->data.bdaddr, bdaddr))
2087 return e;
2088 }
2089
2090 return NULL;
2091}
2092
Johan Hedberga3d4e202012-01-09 00:53:02 +02002093void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002094 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02002095{
2096 struct discovery_state *cache = &hdev->discovery;
2097 struct list_head *pos = &cache->resolve;
2098 struct inquiry_entry *p;
2099
2100 list_del(&ie->list);
2101
2102 list_for_each_entry(p, &cache->resolve, list) {
2103 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002104 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02002105 break;
2106 pos = &p->list;
2107 }
2108
2109 list_add(&ie->list, pos);
2110}
2111
Marcel Holtmannaf589252014-07-01 14:11:20 +02002112u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2113 bool name_known)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114{
Johan Hedberg30883512012-01-04 14:16:21 +02002115 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002116 struct inquiry_entry *ie;
Marcel Holtmannaf589252014-07-01 14:11:20 +02002117 u32 flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002119 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
Szymon Janc2b2fec42012-11-20 11:38:54 +01002121 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2122
Marcel Holtmannaf589252014-07-01 14:11:20 +02002123 if (!data->ssp_mode)
2124 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002125
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002126 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002127 if (ie) {
Marcel Holtmannaf589252014-07-01 14:11:20 +02002128 if (!ie->data.ssp_mode)
2129 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002130
Johan Hedberga3d4e202012-01-09 00:53:02 +02002131 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002132 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002133 ie->data.rssi = data->rssi;
2134 hci_inquiry_cache_update_resolve(hdev, ie);
2135 }
2136
Johan Hedberg561aafb2012-01-04 13:31:59 +02002137 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002138 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002139
Johan Hedberg561aafb2012-01-04 13:31:59 +02002140 /* Entry not in the cache. Add new one. */
Johan Hedberg27f70f32014-07-21 10:50:06 +03002141 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
Marcel Holtmannaf589252014-07-01 14:11:20 +02002142 if (!ie) {
2143 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2144 goto done;
2145 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02002146
2147 list_add(&ie->all, &cache->all);
2148
2149 if (name_known) {
2150 ie->name_state = NAME_KNOWN;
2151 } else {
2152 ie->name_state = NAME_NOT_KNOWN;
2153 list_add(&ie->list, &cache->unknown);
2154 }
2155
2156update:
2157 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002158 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002159 ie->name_state = NAME_KNOWN;
2160 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
2162
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002163 memcpy(&ie->data, data, sizeof(*data));
2164 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002166
2167 if (ie->name_state == NAME_NOT_KNOWN)
Marcel Holtmannaf589252014-07-01 14:11:20 +02002168 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
Johan Hedberg31754052012-01-04 13:39:52 +02002169
Marcel Holtmannaf589252014-07-01 14:11:20 +02002170done:
2171 return flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172}
2173
2174static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2175{
Johan Hedberg30883512012-01-04 14:16:21 +02002176 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 struct inquiry_info *info = (struct inquiry_info *) buf;
2178 struct inquiry_entry *e;
2179 int copied = 0;
2180
Johan Hedberg561aafb2012-01-04 13:31:59 +02002181 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002183
2184 if (copied >= num)
2185 break;
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 bacpy(&info->bdaddr, &data->bdaddr);
2188 info->pscan_rep_mode = data->pscan_rep_mode;
2189 info->pscan_period_mode = data->pscan_period_mode;
2190 info->pscan_mode = data->pscan_mode;
2191 memcpy(info->dev_class, data->dev_class, 3);
2192 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002195 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 }
2197
2198 BT_DBG("cache %p, copied %d", cache, copied);
2199 return copied;
2200}
2201
Johan Hedberg42c6b122013-03-05 20:37:49 +02002202static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203{
2204 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002205 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 struct hci_cp_inquiry cp;
2207
2208 BT_DBG("%s", hdev->name);
2209
2210 if (test_bit(HCI_INQUIRY, &hdev->flags))
2211 return;
2212
2213 /* Start Inquiry */
2214 memcpy(&cp.lap, &ir->lap, 3);
2215 cp.length = ir->length;
2216 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002217 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218}
2219
2220int hci_inquiry(void __user *arg)
2221{
2222 __u8 __user *ptr = arg;
2223 struct hci_inquiry_req ir;
2224 struct hci_dev *hdev;
2225 int err = 0, do_inquiry = 0, max_rsp;
2226 long timeo;
2227 __u8 *buf;
2228
2229 if (copy_from_user(&ir, ptr, sizeof(ir)))
2230 return -EFAULT;
2231
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002232 hdev = hci_dev_get(ir.dev_id);
2233 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 return -ENODEV;
2235
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002236 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2237 err = -EBUSY;
2238 goto done;
2239 }
2240
Marcel Holtmann4a964402014-07-02 19:10:33 +02002241 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002242 err = -EOPNOTSUPP;
2243 goto done;
2244 }
2245
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002246 if (hdev->dev_type != HCI_BREDR) {
2247 err = -EOPNOTSUPP;
2248 goto done;
2249 }
2250
Johan Hedberg56f87902013-10-02 13:43:13 +03002251 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2252 err = -EOPNOTSUPP;
2253 goto done;
2254 }
2255
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002256 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002257 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002258 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002259 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 do_inquiry = 1;
2261 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002262 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
Marcel Holtmann04837f62006-07-03 10:02:33 +02002264 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002265
2266 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002267 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2268 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002269 if (err < 0)
2270 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002271
2272 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2273 * cleared). If it is interrupted by a signal, return -EINTR.
2274 */
NeilBrown74316202014-07-07 15:16:04 +10002275 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
Andre Guedes3e13fa12013-03-27 20:04:56 -03002276 TASK_INTERRUPTIBLE))
2277 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002278 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002280 /* for unlimited number of responses we will use buffer with
2281 * 255 entries
2282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2284
2285 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2286 * copy it to the user space.
2287 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002288 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002289 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 err = -ENOMEM;
2291 goto done;
2292 }
2293
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002294 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002296 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
2298 BT_DBG("num_rsp %d", ir.num_rsp);
2299
2300 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2301 ptr += sizeof(ir);
2302 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002303 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002305 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 err = -EFAULT;
2307
2308 kfree(buf);
2309
2310done:
2311 hci_dev_put(hdev);
2312 return err;
2313}
2314
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002315static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 int ret = 0;
2318
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 BT_DBG("%s %p", hdev->name, hdev);
2320
2321 hci_req_lock(hdev);
2322
Johan Hovold94324962012-03-15 14:48:41 +01002323 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2324 ret = -ENODEV;
2325 goto done;
2326 }
2327
Marcel Holtmannd603b762014-07-06 12:11:14 +02002328 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2329 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002330 /* Check for rfkill but allow the HCI setup stage to
2331 * proceed (which in itself doesn't cause any RF activity).
2332 */
2333 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2334 ret = -ERFKILL;
2335 goto done;
2336 }
2337
2338 /* Check for valid public address or a configured static
2339 * random adddress, but let the HCI setup proceed to
2340 * be able to determine if there is a public address
2341 * or not.
2342 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002343 * In case of user channel usage, it is not important
2344 * if a public address or static random address is
2345 * available.
2346 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002347 * This check is only valid for BR/EDR controllers
2348 * since AMP controllers do not have an address.
2349 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002350 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2351 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002352 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2353 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2354 ret = -EADDRNOTAVAIL;
2355 goto done;
2356 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002357 }
2358
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 if (test_bit(HCI_UP, &hdev->flags)) {
2360 ret = -EALREADY;
2361 goto done;
2362 }
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (hdev->open(hdev)) {
2365 ret = -EIO;
2366 goto done;
2367 }
2368
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002369 atomic_set(&hdev->cmd_cnt, 1);
2370 set_bit(HCI_INIT, &hdev->flags);
2371
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002372 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2373 if (hdev->setup)
2374 ret = hdev->setup(hdev);
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002375
Marcel Holtmannaf202f82014-07-04 17:23:34 +02002376 /* The transport driver can set these quirks before
2377 * creating the HCI device or in its setup callback.
2378 *
2379 * In case any of them is set, the controller has to
2380 * start up as unconfigured.
2381 */
Marcel Holtmanneb1904f2014-07-04 17:23:33 +02002382 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2383 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002384 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmann0ebca7d2014-07-05 10:48:02 +02002385
2386 /* For an unconfigured controller it is required to
2387 * read at least the version information provided by
2388 * the Read Local Version Information command.
2389 *
2390 * If the set_bdaddr driver callback is provided, then
2391 * also the original Bluetooth public device address
2392 * will be read using the Read BD Address command.
2393 */
2394 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2395 ret = __hci_unconf_init(hdev);
Marcel Holtmann89bc22d2014-07-04 16:54:37 +02002396 }
2397
Marcel Holtmann9713c172014-07-06 12:11:15 +02002398 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2399 /* If public address change is configured, ensure that
2400 * the address gets programmed. If the driver does not
2401 * support changing the public address, fail the power
2402 * on procedure.
2403 */
2404 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2405 hdev->set_bdaddr)
Marcel Holtmann24c457e2014-07-02 00:53:47 +02002406 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2407 else
2408 ret = -EADDRNOTAVAIL;
2409 }
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002410
2411 if (!ret) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02002412 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002413 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002414 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 }
2416
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002417 clear_bit(HCI_INIT, &hdev->flags);
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 if (!ret) {
2420 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002421 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 set_bit(HCI_UP, &hdev->flags);
2423 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002424 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02002425 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
Marcel Holtmann4a964402014-07-02 19:10:33 +02002426 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002427 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002428 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002429 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002430 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002431 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002432 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002433 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002435 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002436 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002437 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
2439 skb_queue_purge(&hdev->cmd_q);
2440 skb_queue_purge(&hdev->rx_q);
2441
2442 if (hdev->flush)
2443 hdev->flush(hdev);
2444
2445 if (hdev->sent_cmd) {
2446 kfree_skb(hdev->sent_cmd);
2447 hdev->sent_cmd = NULL;
2448 }
2449
2450 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002451 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 }
2453
2454done:
2455 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 return ret;
2457}
2458
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002459/* ---- HCI ioctl helpers ---- */
2460
2461int hci_dev_open(__u16 dev)
2462{
2463 struct hci_dev *hdev;
2464 int err;
2465
2466 hdev = hci_dev_get(dev);
2467 if (!hdev)
2468 return -ENODEV;
2469
Marcel Holtmann4a964402014-07-02 19:10:33 +02002470 /* Devices that are marked as unconfigured can only be powered
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002471 * up as user channel. Trying to bring them up as normal devices
2472 * will result into a failure. Only user channel operation is
2473 * possible.
2474 *
2475 * When this function is called for a user channel, the flag
2476 * HCI_USER_CHANNEL will be set first before attempting to
2477 * open the device.
2478 */
Marcel Holtmann4a964402014-07-02 19:10:33 +02002479 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002480 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2481 err = -EOPNOTSUPP;
2482 goto done;
2483 }
2484
Johan Hedberge1d08f42013-10-01 22:44:50 +03002485 /* We need to ensure that no other power on/off work is pending
2486 * before proceeding to call hci_dev_do_open. This is
2487 * particularly important if the setup procedure has not yet
2488 * completed.
2489 */
2490 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2491 cancel_delayed_work(&hdev->power_off);
2492
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002493 /* After this call it is guaranteed that the setup procedure
2494 * has finished. This means that error conditions like RFKILL
2495 * or no valid public or static random address apply.
2496 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002497 flush_workqueue(hdev->req_workqueue);
2498
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002499 /* For controllers not using the management interface and that
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002500 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002501 * so that pairing works for them. Once the management interface
2502 * is in use this bit will be cleared again and userspace has
2503 * to explicitly enable it.
2504 */
2505 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2506 !test_bit(HCI_MGMT, &hdev->dev_flags))
Johan Hedbergb6ae8452014-07-30 09:22:22 +03002507 set_bit(HCI_BONDABLE, &hdev->dev_flags);
Marcel Holtmann12aa4f02014-07-10 15:25:22 +02002508
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002509 err = hci_dev_do_open(hdev);
2510
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002511done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002512 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002513 return err;
2514}
2515
Johan Hedbergd7347f32014-07-04 12:37:23 +03002516/* This function requires the caller holds hdev->lock */
2517static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2518{
2519 struct hci_conn_params *p;
2520
Johan Hedbergf161dd42014-08-15 21:06:54 +03002521 list_for_each_entry(p, &hdev->le_conn_params, list) {
2522 if (p->conn) {
2523 hci_conn_drop(p->conn);
Johan Hedbergf8aaf9b2014-08-17 23:28:57 +03002524 hci_conn_put(p->conn);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002525 p->conn = NULL;
2526 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002527 list_del_init(&p->action);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002528 }
Johan Hedbergd7347f32014-07-04 12:37:23 +03002529
2530 BT_DBG("All LE pending actions cleared");
2531}
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533static int hci_dev_do_close(struct hci_dev *hdev)
2534{
2535 BT_DBG("%s %p", hdev->name, hdev);
2536
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002537 cancel_delayed_work(&hdev->power_off);
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 hci_req_cancel(hdev, ENODEV);
2540 hci_req_lock(hdev);
2541
2542 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002543 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 hci_req_unlock(hdev);
2545 return 0;
2546 }
2547
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002548 /* Flush RX and TX works */
2549 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002550 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002552 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002553 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002554 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002555 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002556 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002557 }
2558
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002559 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002560 cancel_delayed_work(&hdev->service_cache);
2561
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002562 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002563
2564 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2565 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002566
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002567 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002568 hci_inquiry_cache_flush(hdev);
Johan Hedbergd7347f32014-07-04 12:37:23 +03002569 hci_pend_le_actions_clear(hdev);
Johan Hedbergf161dd42014-08-15 21:06:54 +03002570 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002571 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572
2573 hci_notify(hdev, HCI_DEV_DOWN);
2574
2575 if (hdev->flush)
2576 hdev->flush(hdev);
2577
2578 /* Reset device */
2579 skb_queue_purge(&hdev->cmd_q);
2580 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmann4a964402014-07-02 19:10:33 +02002581 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2582 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002583 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002585 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586 clear_bit(HCI_INIT, &hdev->flags);
2587 }
2588
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002589 /* flush cmd work */
2590 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 /* Drop queues */
2593 skb_queue_purge(&hdev->rx_q);
2594 skb_queue_purge(&hdev->cmd_q);
2595 skb_queue_purge(&hdev->raw_q);
2596
2597 /* Drop last sent command */
2598 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002599 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 kfree_skb(hdev->sent_cmd);
2601 hdev->sent_cmd = NULL;
2602 }
2603
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002604 kfree_skb(hdev->recv_evt);
2605 hdev->recv_evt = NULL;
2606
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 /* After this point our queues are empty
2608 * and no tasks are scheduled. */
2609 hdev->close(hdev);
2610
Johan Hedberg35b973c2013-03-15 17:06:59 -05002611 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002612 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002613 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2614
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002615 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2616 if (hdev->dev_type == HCI_BREDR) {
2617 hci_dev_lock(hdev);
2618 mgmt_powered(hdev, 0);
2619 hci_dev_unlock(hdev);
2620 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002621 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002622
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002623 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002624 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002625
Johan Hedberge59fda82012-02-22 18:11:53 +02002626 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002627 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002628 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 hci_req_unlock(hdev);
2631
2632 hci_dev_put(hdev);
2633 return 0;
2634}
2635
2636int hci_dev_close(__u16 dev)
2637{
2638 struct hci_dev *hdev;
2639 int err;
2640
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002641 hdev = hci_dev_get(dev);
2642 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002644
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002645 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2646 err = -EBUSY;
2647 goto done;
2648 }
2649
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002650 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2651 cancel_delayed_work(&hdev->power_off);
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002654
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002655done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 hci_dev_put(hdev);
2657 return err;
2658}
2659
2660int hci_dev_reset(__u16 dev)
2661{
2662 struct hci_dev *hdev;
2663 int ret = 0;
2664
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002665 hdev = hci_dev_get(dev);
2666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 return -ENODEV;
2668
2669 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
Marcel Holtmann808a0492013-08-26 20:57:58 -07002671 if (!test_bit(HCI_UP, &hdev->flags)) {
2672 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002674 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002676 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2677 ret = -EBUSY;
2678 goto done;
2679 }
2680
Marcel Holtmann4a964402014-07-02 19:10:33 +02002681 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002682 ret = -EOPNOTSUPP;
2683 goto done;
2684 }
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 /* Drop queues */
2687 skb_queue_purge(&hdev->rx_q);
2688 skb_queue_purge(&hdev->cmd_q);
2689
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002690 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002691 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002693 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 if (hdev->flush)
2696 hdev->flush(hdev);
2697
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002698 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002699 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002701 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702
2703done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 hci_req_unlock(hdev);
2705 hci_dev_put(hdev);
2706 return ret;
2707}
2708
2709int hci_dev_reset_stat(__u16 dev)
2710{
2711 struct hci_dev *hdev;
2712 int ret = 0;
2713
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002714 hdev = hci_dev_get(dev);
2715 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 return -ENODEV;
2717
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002718 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2719 ret = -EBUSY;
2720 goto done;
2721 }
2722
Marcel Holtmann4a964402014-07-02 19:10:33 +02002723 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002724 ret = -EOPNOTSUPP;
2725 goto done;
2726 }
2727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2729
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002730done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 return ret;
2733}
2734
Johan Hedberg123abc02014-07-10 12:09:07 +03002735static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2736{
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002737 bool conn_changed, discov_changed;
Johan Hedberg123abc02014-07-10 12:09:07 +03002738
2739 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2740
2741 if ((scan & SCAN_PAGE))
2742 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2743 &hdev->dev_flags);
2744 else
2745 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2746 &hdev->dev_flags);
2747
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002748 if ((scan & SCAN_INQUIRY)) {
2749 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2750 &hdev->dev_flags);
2751 } else {
2752 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2753 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2754 &hdev->dev_flags);
2755 }
2756
Johan Hedberg123abc02014-07-10 12:09:07 +03002757 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2758 return;
2759
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002760 if (conn_changed || discov_changed) {
2761 /* In case this was disabled through mgmt */
2762 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2763
2764 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2765 mgmt_update_adv_data(hdev);
2766
Johan Hedberg123abc02014-07-10 12:09:07 +03002767 mgmt_new_settings(hdev);
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002768 }
Johan Hedberg123abc02014-07-10 12:09:07 +03002769}
2770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771int hci_dev_cmd(unsigned int cmd, void __user *arg)
2772{
2773 struct hci_dev *hdev;
2774 struct hci_dev_req dr;
2775 int err = 0;
2776
2777 if (copy_from_user(&dr, arg, sizeof(dr)))
2778 return -EFAULT;
2779
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002780 hdev = hci_dev_get(dr.dev_id);
2781 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 return -ENODEV;
2783
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002784 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2785 err = -EBUSY;
2786 goto done;
2787 }
2788
Marcel Holtmann4a964402014-07-02 19:10:33 +02002789 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002790 err = -EOPNOTSUPP;
2791 goto done;
2792 }
2793
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002794 if (hdev->dev_type != HCI_BREDR) {
2795 err = -EOPNOTSUPP;
2796 goto done;
2797 }
2798
Johan Hedberg56f87902013-10-02 13:43:13 +03002799 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2800 err = -EOPNOTSUPP;
2801 goto done;
2802 }
2803
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 switch (cmd) {
2805 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002806 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2807 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 break;
2809
2810 case HCISETENCRYPT:
2811 if (!lmp_encrypt_capable(hdev)) {
2812 err = -EOPNOTSUPP;
2813 break;
2814 }
2815
2816 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2817 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002818 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2819 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 if (err)
2821 break;
2822 }
2823
Johan Hedberg01178cd2013-03-05 20:37:41 +02002824 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2825 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 break;
2827
2828 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002829 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2830 HCI_INIT_TIMEOUT);
Johan Hedberg91a668b2014-07-09 13:28:26 +03002831
Johan Hedbergbc6d2d02014-07-10 12:09:08 +03002832 /* Ensure that the connectable and discoverable states
2833 * get correctly modified as this was a non-mgmt change.
Johan Hedberg91a668b2014-07-09 13:28:26 +03002834 */
Johan Hedberg123abc02014-07-10 12:09:07 +03002835 if (!err)
2836 hci_update_scan_state(hdev, dr.dev_opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 break;
2838
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002839 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002840 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2841 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002842 break;
2843
2844 case HCISETLINKMODE:
2845 hdev->link_mode = ((__u16) dr.dev_opt) &
2846 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2847 break;
2848
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 case HCISETPTYPE:
2850 hdev->pkt_type = (__u16) dr.dev_opt;
2851 break;
2852
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002854 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2855 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 break;
2857
2858 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002859 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2860 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 break;
2862
2863 default:
2864 err = -EINVAL;
2865 break;
2866 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002867
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002868done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 hci_dev_put(hdev);
2870 return err;
2871}
2872
2873int hci_get_dev_list(void __user *arg)
2874{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002875 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 struct hci_dev_list_req *dl;
2877 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 int n = 0, size, err;
2879 __u16 dev_num;
2880
2881 if (get_user(dev_num, (__u16 __user *) arg))
2882 return -EFAULT;
2883
2884 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2885 return -EINVAL;
2886
2887 size = sizeof(*dl) + dev_num * sizeof(*dr);
2888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002889 dl = kzalloc(size, GFP_KERNEL);
2890 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 return -ENOMEM;
2892
2893 dr = dl->dev_req;
2894
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002895 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002896 list_for_each_entry(hdev, &hci_dev_list, list) {
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002897 unsigned long flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002898
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002899 /* When the auto-off is configured it means the transport
2900 * is running, but in that case still indicate that the
2901 * device is actually down.
2902 */
2903 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2904 flags &= ~BIT(HCI_UP);
Johan Hedbergc542a062011-01-26 13:11:03 +02002905
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 (dr + n)->dev_id = hdev->id;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002907 (dr + n)->dev_opt = flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002908
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 if (++n >= dev_num)
2910 break;
2911 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002912 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
2914 dl->dev_num = n;
2915 size = sizeof(*dl) + n * sizeof(*dr);
2916
2917 err = copy_to_user(arg, dl, size);
2918 kfree(dl);
2919
2920 return err ? -EFAULT : 0;
2921}
2922
2923int hci_get_dev_info(void __user *arg)
2924{
2925 struct hci_dev *hdev;
2926 struct hci_dev_info di;
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002927 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 int err = 0;
2929
2930 if (copy_from_user(&di, arg, sizeof(di)))
2931 return -EFAULT;
2932
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002933 hdev = hci_dev_get(di.dev_id);
2934 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 return -ENODEV;
2936
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002937 /* When the auto-off is configured it means the transport
2938 * is running, but in that case still indicate that the
2939 * device is actually down.
2940 */
2941 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2942 flags = hdev->flags & ~BIT(HCI_UP);
2943 else
2944 flags = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002945
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 strcpy(di.name, hdev->name);
2947 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002948 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Marcel Holtmann2e84d8d2014-07-10 13:17:37 +02002949 di.flags = flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002951 if (lmp_bredr_capable(hdev)) {
2952 di.acl_mtu = hdev->acl_mtu;
2953 di.acl_pkts = hdev->acl_pkts;
2954 di.sco_mtu = hdev->sco_mtu;
2955 di.sco_pkts = hdev->sco_pkts;
2956 } else {
2957 di.acl_mtu = hdev->le_mtu;
2958 di.acl_pkts = hdev->le_pkts;
2959 di.sco_mtu = 0;
2960 di.sco_pkts = 0;
2961 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 di.link_policy = hdev->link_policy;
2963 di.link_mode = hdev->link_mode;
2964
2965 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2966 memcpy(&di.features, &hdev->features, sizeof(di.features));
2967
2968 if (copy_to_user(arg, &di, sizeof(di)))
2969 err = -EFAULT;
2970
2971 hci_dev_put(hdev);
2972
2973 return err;
2974}
2975
2976/* ---- Interface to HCI drivers ---- */
2977
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002978static int hci_rfkill_set_block(void *data, bool blocked)
2979{
2980 struct hci_dev *hdev = data;
2981
2982 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2983
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002984 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2985 return -EBUSY;
2986
Johan Hedberg5e130362013-09-13 08:58:17 +03002987 if (blocked) {
2988 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Marcel Holtmannd603b762014-07-06 12:11:14 +02002989 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2990 !test_bit(HCI_CONFIG, &hdev->dev_flags))
Johan Hedbergbf543032013-09-13 08:58:18 +03002991 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002992 } else {
2993 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002994 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002995
2996 return 0;
2997}
2998
2999static const struct rfkill_ops hci_rfkill_ops = {
3000 .set_block = hci_rfkill_set_block,
3001};
3002
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003003static void hci_power_on(struct work_struct *work)
3004{
3005 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003006 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003007
3008 BT_DBG("%s", hdev->name);
3009
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03003010 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03003011 if (err < 0) {
3012 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003013 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03003014 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003015
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003016 /* During the HCI setup phase, a few error conditions are
3017 * ignored and they need to be checked now. If they are still
3018 * valid, it is important to turn the device back off.
3019 */
3020 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
Marcel Holtmann4a964402014-07-02 19:10:33 +02003021 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07003022 (hdev->dev_type == HCI_BREDR &&
3023 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3024 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03003025 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3026 hci_dev_do_close(hdev);
3027 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02003028 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3029 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03003030 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003031
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003032 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann4a964402014-07-02 19:10:33 +02003033 /* For unconfigured devices, set the HCI_RAW flag
3034 * so that userspace can easily identify them.
Marcel Holtmann4a964402014-07-02 19:10:33 +02003035 */
3036 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3037 set_bit(HCI_RAW, &hdev->flags);
Marcel Holtmann0602a8a2014-07-02 21:30:54 +02003038
3039 /* For fully configured devices, this will send
3040 * the Index Added event. For unconfigured devices,
3041 * it will send Unconfigued Index Added event.
3042 *
3043 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3044 * and no event will be send.
3045 */
Johan Hedberg744cf192011-11-08 20:40:14 +02003046 mgmt_index_added(hdev);
Marcel Holtmannd603b762014-07-06 12:11:14 +02003047 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
Marcel Holtmann5ea234d2014-07-06 12:11:16 +02003048 /* When the controller is now configured, then it
3049 * is important to clear the HCI_RAW flag.
3050 */
3051 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3052 clear_bit(HCI_RAW, &hdev->flags);
3053
Marcel Holtmannd603b762014-07-06 12:11:14 +02003054 /* Powering on the controller with HCI_CONFIG set only
3055 * happens with the transition from unconfigured to
3056 * configured. This will send the Index Added event.
3057 */
3058 mgmt_index_added(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003059 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003060}
3061
3062static void hci_power_off(struct work_struct *work)
3063{
Johan Hedberg32435532011-11-07 22:16:04 +02003064 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003065 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003066
3067 BT_DBG("%s", hdev->name);
3068
Marcel Holtmann8ee56542012-02-21 12:33:48 +01003069 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003070}
3071
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003072static void hci_discov_off(struct work_struct *work)
3073{
3074 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003075
3076 hdev = container_of(work, struct hci_dev, discov_off.work);
3077
3078 BT_DBG("%s", hdev->name);
3079
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07003080 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02003081}
3082
Johan Hedberg35f74982014-02-18 17:14:32 +02003083void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003084{
Johan Hedberg48210022013-01-27 00:31:28 +02003085 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003086
Johan Hedberg48210022013-01-27 00:31:28 +02003087 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3088 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003089 kfree(uuid);
3090 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003091}
3092
Johan Hedberg35f74982014-02-18 17:14:32 +02003093void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003094{
3095 struct list_head *p, *n;
3096
3097 list_for_each_safe(p, n, &hdev->link_keys) {
3098 struct link_key *key;
3099
3100 key = list_entry(p, struct link_key, list);
3101
3102 list_del(p);
3103 kfree(key);
3104 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003105}
3106
Johan Hedberg35f74982014-02-18 17:14:32 +02003107void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003108{
3109 struct smp_ltk *k, *tmp;
3110
3111 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3112 list_del(&k->list);
3113 kfree(k);
3114 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003115}
3116
Johan Hedberg970c4e42014-02-18 10:19:33 +02003117void hci_smp_irks_clear(struct hci_dev *hdev)
3118{
3119 struct smp_irk *k, *tmp;
3120
3121 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3122 list_del(&k->list);
3123 kfree(k);
3124 }
3125}
3126
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003127struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3128{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003129 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003130
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003131 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003132 if (bacmp(bdaddr, &k->bdaddr) == 0)
3133 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003134
3135 return NULL;
3136}
3137
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303138static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003139 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003140{
3141 /* Legacy key */
3142 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303143 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003144
3145 /* Debug keys are insecure so don't store them persistently */
3146 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303147 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003148
3149 /* Changed combination key and there's no previous one */
3150 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303151 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003152
3153 /* Security mode 3 case */
3154 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303155 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003156
3157 /* Neither local nor remote side had no-bonding as requirement */
3158 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303159 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003160
3161 /* Local side had dedicated bonding as requirement */
3162 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303163 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003164
3165 /* Remote side had dedicated bonding as requirement */
3166 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303167 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003168
3169 /* If none of the above criteria match, then don't store the key
3170 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303171 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003172}
3173
Johan Hedberge804d252014-07-16 11:42:28 +03003174static u8 ltk_role(u8 type)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003175{
Johan Hedberge804d252014-07-16 11:42:28 +03003176 if (type == SMP_LTK)
3177 return HCI_ROLE_MASTER;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003178
Johan Hedberge804d252014-07-16 11:42:28 +03003179 return HCI_ROLE_SLAVE;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003180}
3181
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003182struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberge804d252014-07-16 11:42:28 +03003183 u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003184{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003185 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003186
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003187 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003188 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003189 continue;
3190
Johan Hedberge804d252014-07-16 11:42:28 +03003191 if (ltk_role(k->type) != role)
Johan Hedberg98a0b842014-01-30 19:40:00 -08003192 continue;
3193
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003194 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003195 }
3196
3197 return NULL;
3198}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003199
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003200struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberge804d252014-07-16 11:42:28 +03003201 u8 addr_type, u8 role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003202{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003203 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003204
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003205 list_for_each_entry(k, &hdev->long_term_keys, list)
3206 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08003207 bacmp(bdaddr, &k->bdaddr) == 0 &&
Johan Hedberge804d252014-07-16 11:42:28 +03003208 ltk_role(k->type) == role)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003209 return k;
3210
3211 return NULL;
3212}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003213
Johan Hedberg970c4e42014-02-18 10:19:33 +02003214struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3215{
3216 struct smp_irk *irk;
3217
3218 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3219 if (!bacmp(&irk->rpa, rpa))
3220 return irk;
3221 }
3222
3223 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003224 if (smp_irk_matches(hdev, irk->val, rpa)) {
Johan Hedberg970c4e42014-02-18 10:19:33 +02003225 bacpy(&irk->rpa, rpa);
3226 return irk;
3227 }
3228 }
3229
3230 return NULL;
3231}
3232
3233struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3234 u8 addr_type)
3235{
3236 struct smp_irk *irk;
3237
Johan Hedberg6cfc9982014-02-18 21:41:35 +02003238 /* Identity Address must be public or static random */
3239 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3240 return NULL;
3241
Johan Hedberg970c4e42014-02-18 10:19:33 +02003242 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3243 if (addr_type == irk->addr_type &&
3244 bacmp(bdaddr, &irk->bdaddr) == 0)
3245 return irk;
3246 }
3247
3248 return NULL;
3249}
3250
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003251struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03003252 bdaddr_t *bdaddr, u8 *val, u8 type,
3253 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003254{
3255 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05303256 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003257
3258 old_key = hci_find_link_key(hdev, bdaddr);
3259 if (old_key) {
3260 old_key_type = old_key->type;
3261 key = old_key;
3262 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07003263 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003264 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003265 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003266 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003267 list_add(&key->list, &hdev->link_keys);
3268 }
3269
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003270 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003271
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003272 /* Some buggy controller combinations generate a changed
3273 * combination key for legacy pairing even when there's no
3274 * previous key */
3275 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003276 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003277 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003278 if (conn)
3279 conn->key_type = type;
3280 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003281
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003282 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003283 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003284 key->pin_len = pin_len;
3285
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003286 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003287 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003288 else
3289 key->type = type;
3290
Johan Hedberg7652ff62014-06-24 13:15:49 +03003291 if (persistent)
3292 *persistent = hci_persistent_key(hdev, conn, type,
3293 old_key_type);
Johan Hedberg4df378a2011-04-28 11:29:03 -07003294
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003295 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003296}
3297
Johan Hedbergca9142b2014-02-19 14:57:44 +02003298struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003299 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003300 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003301{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003302 struct smp_ltk *key, *old_key;
Johan Hedberge804d252014-07-16 11:42:28 +03003303 u8 role = ltk_role(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003304
Johan Hedberge804d252014-07-16 11:42:28 +03003305 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003306 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003307 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003308 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003309 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003310 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003311 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003312 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003313 }
3314
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003315 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003316 key->bdaddr_type = addr_type;
3317 memcpy(key->val, tk, sizeof(key->val));
3318 key->authenticated = authenticated;
3319 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003320 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003321 key->enc_size = enc_size;
3322 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003323
Johan Hedbergca9142b2014-02-19 14:57:44 +02003324 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003325}
3326
Johan Hedbergca9142b2014-02-19 14:57:44 +02003327struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3328 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003329{
3330 struct smp_irk *irk;
3331
3332 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3333 if (!irk) {
3334 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3335 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003336 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003337
3338 bacpy(&irk->bdaddr, bdaddr);
3339 irk->addr_type = addr_type;
3340
3341 list_add(&irk->list, &hdev->identity_resolving_keys);
3342 }
3343
3344 memcpy(irk->val, val, 16);
3345 bacpy(&irk->rpa, rpa);
3346
Johan Hedbergca9142b2014-02-19 14:57:44 +02003347 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003348}
3349
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003350int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3351{
3352 struct link_key *key;
3353
3354 key = hci_find_link_key(hdev, bdaddr);
3355 if (!key)
3356 return -ENOENT;
3357
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003358 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003359
3360 list_del(&key->list);
3361 kfree(key);
3362
3363 return 0;
3364}
3365
Johan Hedberge0b2b272014-02-18 17:14:31 +02003366int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003367{
3368 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003369 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003370
3371 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003372 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003373 continue;
3374
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003375 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003376
3377 list_del(&k->list);
3378 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003379 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003380 }
3381
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003382 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003383}
3384
Johan Hedberga7ec7332014-02-18 17:14:35 +02003385void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3386{
3387 struct smp_irk *k, *tmp;
3388
Johan Hedberg668b7b12014-02-21 16:03:31 +02003389 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003390 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3391 continue;
3392
3393 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3394
3395 list_del(&k->list);
3396 kfree(k);
3397 }
3398}
3399
Ville Tervo6bd32322011-02-16 16:32:41 +02003400/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003401static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003402{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003403 struct hci_dev *hdev = container_of(work, struct hci_dev,
3404 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003405
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003406 if (hdev->sent_cmd) {
3407 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3408 u16 opcode = __le16_to_cpu(sent->opcode);
3409
3410 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3411 } else {
3412 BT_ERR("%s command tx timeout", hdev->name);
3413 }
3414
Ville Tervo6bd32322011-02-16 16:32:41 +02003415 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003416 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003417}
3418
Szymon Janc2763eda2011-03-22 13:12:22 +01003419struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003420 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003421{
3422 struct oob_data *data;
3423
3424 list_for_each_entry(data, &hdev->remote_oob_data, list)
3425 if (bacmp(bdaddr, &data->bdaddr) == 0)
3426 return data;
3427
3428 return NULL;
3429}
3430
3431int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3432{
3433 struct oob_data *data;
3434
3435 data = hci_find_remote_oob_data(hdev, bdaddr);
3436 if (!data)
3437 return -ENOENT;
3438
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003439 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003440
3441 list_del(&data->list);
3442 kfree(data);
3443
3444 return 0;
3445}
3446
Johan Hedberg35f74982014-02-18 17:14:32 +02003447void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003448{
3449 struct oob_data *data, *n;
3450
3451 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3452 list_del(&data->list);
3453 kfree(data);
3454 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003455}
3456
Marcel Holtmann07988722014-01-10 02:07:29 -08003457int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3458 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003459{
3460 struct oob_data *data;
3461
3462 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003463 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003464 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003465 if (!data)
3466 return -ENOMEM;
3467
3468 bacpy(&data->bdaddr, bdaddr);
3469 list_add(&data->list, &hdev->remote_oob_data);
3470 }
3471
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003472 memcpy(data->hash192, hash, sizeof(data->hash192));
3473 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003474
Marcel Holtmann07988722014-01-10 02:07:29 -08003475 memset(data->hash256, 0, sizeof(data->hash256));
3476 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3477
3478 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3479
3480 return 0;
3481}
3482
3483int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3484 u8 *hash192, u8 *randomizer192,
3485 u8 *hash256, u8 *randomizer256)
3486{
3487 struct oob_data *data;
3488
3489 data = hci_find_remote_oob_data(hdev, bdaddr);
3490 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003491 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003492 if (!data)
3493 return -ENOMEM;
3494
3495 bacpy(&data->bdaddr, bdaddr);
3496 list_add(&data->list, &hdev->remote_oob_data);
3497 }
3498
3499 memcpy(data->hash192, hash192, sizeof(data->hash192));
3500 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3501
3502 memcpy(data->hash256, hash256, sizeof(data->hash256));
3503 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3504
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003505 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003506
3507 return 0;
3508}
3509
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003510struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003511 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003512{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003513 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003514
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003515 list_for_each_entry(b, bdaddr_list, list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003516 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003517 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003518 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003519
3520 return NULL;
3521}
3522
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003523void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003524{
3525 struct list_head *p, *n;
3526
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003527 list_for_each_safe(p, n, bdaddr_list) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003528 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003529
3530 list_del(p);
3531 kfree(b);
3532 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003533}
3534
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003535int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003536{
3537 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003538
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003539 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003540 return -EBADF;
3541
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003542 if (hci_bdaddr_list_lookup(list, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003543 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003544
Johan Hedberg27f70f32014-07-21 10:50:06 +03003545 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003546 if (!entry)
3547 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003548
3549 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003550 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003551
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003552 list_add(&entry->list, list);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003553
3554 return 0;
3555}
3556
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003557int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003558{
3559 struct bdaddr_list *entry;
3560
Johan Hedberg35f74982014-02-18 17:14:32 +02003561 if (!bacmp(bdaddr, BDADDR_ANY)) {
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003562 hci_bdaddr_list_clear(list);
Johan Hedberg35f74982014-02-18 17:14:32 +02003563 return 0;
3564 }
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003565
Johan Hedbergdcc36c12014-07-09 12:59:13 +03003566 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003567 if (!entry)
3568 return -ENOENT;
3569
3570 list_del(&entry->list);
3571 kfree(entry);
3572
3573 return 0;
3574}
3575
Andre Guedes15819a72014-02-03 13:56:18 -03003576/* This function requires the caller holds hdev->lock */
3577struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3578 bdaddr_t *addr, u8 addr_type)
3579{
3580 struct hci_conn_params *params;
3581
Johan Hedberg738f6182014-07-03 19:33:51 +03003582 /* The conn params list only contains identity addresses */
3583 if (!hci_is_identity_address(addr, addr_type))
3584 return NULL;
3585
Andre Guedes15819a72014-02-03 13:56:18 -03003586 list_for_each_entry(params, &hdev->le_conn_params, list) {
3587 if (bacmp(&params->addr, addr) == 0 &&
3588 params->addr_type == addr_type) {
3589 return params;
3590 }
3591 }
3592
3593 return NULL;
3594}
3595
Andre Guedescef952c2014-02-26 20:21:49 -03003596static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3597{
3598 struct hci_conn *conn;
3599
3600 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3601 if (!conn)
3602 return false;
3603
3604 if (conn->dst_type != type)
3605 return false;
3606
3607 if (conn->state != BT_CONNECTED)
3608 return false;
3609
3610 return true;
3611}
3612
Andre Guedes15819a72014-02-03 13:56:18 -03003613/* This function requires the caller holds hdev->lock */
Johan Hedberg501f8822014-07-04 12:37:26 +03003614struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3615 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003616{
Johan Hedberg912b42e2014-07-03 19:33:49 +03003617 struct hci_conn_params *param;
Andre Guedes15819a72014-02-03 13:56:18 -03003618
Johan Hedberg738f6182014-07-03 19:33:51 +03003619 /* The list only contains identity addresses */
3620 if (!hci_is_identity_address(addr, addr_type))
3621 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003622
Johan Hedberg501f8822014-07-04 12:37:26 +03003623 list_for_each_entry(param, list, action) {
Johan Hedberg912b42e2014-07-03 19:33:49 +03003624 if (bacmp(&param->addr, addr) == 0 &&
3625 param->addr_type == addr_type)
3626 return param;
Marcel Holtmann4b109662014-06-29 13:41:49 +02003627 }
3628
3629 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003630}
3631
3632/* This function requires the caller holds hdev->lock */
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003633struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3634 bdaddr_t *addr, u8 addr_type)
Andre Guedes15819a72014-02-03 13:56:18 -03003635{
3636 struct hci_conn_params *params;
3637
Johan Hedbergc46245b2014-07-02 17:37:33 +03003638 if (!hci_is_identity_address(addr, addr_type))
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003639 return NULL;
Andre Guedesa9b0a042014-02-26 20:21:52 -03003640
Andre Guedes15819a72014-02-03 13:56:18 -03003641 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003642 if (params)
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003643 return params;
Andre Guedes15819a72014-02-03 13:56:18 -03003644
3645 params = kzalloc(sizeof(*params), GFP_KERNEL);
3646 if (!params) {
3647 BT_ERR("Out of memory");
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003648 return NULL;
Andre Guedes15819a72014-02-03 13:56:18 -03003649 }
3650
3651 bacpy(&params->addr, addr);
3652 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003653
3654 list_add(&params->list, &hdev->le_conn_params);
Johan Hedberg93450c72014-07-04 12:37:17 +03003655 INIT_LIST_HEAD(&params->action);
Andre Guedescef952c2014-02-26 20:21:49 -03003656
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003657 params->conn_min_interval = hdev->le_conn_min_interval;
3658 params->conn_max_interval = hdev->le_conn_max_interval;
3659 params->conn_latency = hdev->le_conn_latency;
3660 params->supervision_timeout = hdev->le_supv_timeout;
3661 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3662
3663 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3664
Marcel Holtmann51d167c2014-07-01 12:11:04 +02003665 return params;
Marcel Holtmannbf5b3c82014-06-30 12:34:39 +02003666}
3667
3668/* This function requires the caller holds hdev->lock */
3669int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003670 u8 auto_connect)
Andre Guedes15819a72014-02-03 13:56:18 -03003671{
3672 struct hci_conn_params *params;
3673
Marcel Holtmann8c87aae2014-07-01 12:11:05 +02003674 params = hci_conn_params_add(hdev, addr, addr_type);
3675 if (!params)
3676 return -EIO;
Andre Guedes15819a72014-02-03 13:56:18 -03003677
Johan Hedberg42ce26d2014-07-04 12:37:20 +03003678 if (params->auto_connect == auto_connect)
3679 return 0;
3680
Johan Hedberg95305ba2014-07-04 12:37:21 +03003681 list_del_init(&params->action);
Andre Guedes15819a72014-02-03 13:56:18 -03003682
Andre Guedescef952c2014-02-26 20:21:49 -03003683 switch (auto_connect) {
3684 case HCI_AUTO_CONN_DISABLED:
3685 case HCI_AUTO_CONN_LINK_LOSS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003686 hci_update_background_scan(hdev);
Andre Guedescef952c2014-02-26 20:21:49 -03003687 break;
Johan Hedberg851efca2014-07-02 22:42:00 +03003688 case HCI_AUTO_CONN_REPORT:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003689 list_add(&params->action, &hdev->pend_le_reports);
3690 hci_update_background_scan(hdev);
Johan Hedberg851efca2014-07-02 22:42:00 +03003691 break;
Marcel Holtmann4b9e7e72014-07-23 21:55:23 +02003692 case HCI_AUTO_CONN_DIRECT:
Andre Guedescef952c2014-02-26 20:21:49 -03003693 case HCI_AUTO_CONN_ALWAYS:
Johan Hedberg95305ba2014-07-04 12:37:21 +03003694 if (!is_connected(hdev, addr, addr_type)) {
3695 list_add(&params->action, &hdev->pend_le_conns);
3696 hci_update_background_scan(hdev);
3697 }
Andre Guedescef952c2014-02-26 20:21:49 -03003698 break;
3699 }
Andre Guedes15819a72014-02-03 13:56:18 -03003700
Johan Hedberg851efca2014-07-02 22:42:00 +03003701 params->auto_connect = auto_connect;
3702
Marcel Holtmannd06b50c2014-07-01 12:11:06 +02003703 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3704 auto_connect);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003705
3706 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003707}
3708
Johan Hedbergf6c63242014-08-15 21:06:59 +03003709static void hci_conn_params_free(struct hci_conn_params *params)
3710{
3711 if (params->conn) {
3712 hci_conn_drop(params->conn);
3713 hci_conn_put(params->conn);
3714 }
3715
3716 list_del(&params->action);
3717 list_del(&params->list);
3718 kfree(params);
3719}
3720
Andre Guedes15819a72014-02-03 13:56:18 -03003721/* This function requires the caller holds hdev->lock */
3722void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3723{
3724 struct hci_conn_params *params;
3725
3726 params = hci_conn_params_lookup(hdev, addr, addr_type);
3727 if (!params)
3728 return;
3729
Johan Hedbergf6c63242014-08-15 21:06:59 +03003730 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003731
Johan Hedberg95305ba2014-07-04 12:37:21 +03003732 hci_update_background_scan(hdev);
3733
Andre Guedes15819a72014-02-03 13:56:18 -03003734 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3735}
3736
3737/* This function requires the caller holds hdev->lock */
Johan Hedberg55af49a2014-07-02 17:37:26 +03003738void hci_conn_params_clear_disabled(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003739{
3740 struct hci_conn_params *params, *tmp;
3741
3742 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
Johan Hedberg55af49a2014-07-02 17:37:26 +03003743 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3744 continue;
Andre Guedes15819a72014-02-03 13:56:18 -03003745 list_del(&params->list);
3746 kfree(params);
3747 }
3748
Johan Hedberg55af49a2014-07-02 17:37:26 +03003749 BT_DBG("All LE disabled connection parameters were removed");
3750}
3751
3752/* This function requires the caller holds hdev->lock */
Johan Hedberg373110c2014-07-02 17:37:25 +03003753void hci_conn_params_clear_all(struct hci_dev *hdev)
Andre Guedes15819a72014-02-03 13:56:18 -03003754{
3755 struct hci_conn_params *params, *tmp;
3756
Johan Hedbergf6c63242014-08-15 21:06:59 +03003757 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3758 hci_conn_params_free(params);
Andre Guedes15819a72014-02-03 13:56:18 -03003759
Johan Hedberga2f41a82014-07-04 12:37:19 +03003760 hci_update_background_scan(hdev);
Marcel Holtmann1089b672014-06-29 13:41:50 +02003761
Andre Guedes15819a72014-02-03 13:56:18 -03003762 BT_DBG("All LE connection parameters were removed");
3763}
3764
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003765static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003766{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003767 if (status) {
3768 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003769
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003770 hci_dev_lock(hdev);
3771 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3772 hci_dev_unlock(hdev);
3773 return;
3774 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003775}
3776
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003777static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003778{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003779 /* General inquiry access code (GIAC) */
3780 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3781 struct hci_request req;
3782 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003783 int err;
3784
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003785 if (status) {
3786 BT_ERR("Failed to disable LE scanning: status %d", status);
3787 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003788 }
3789
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003790 switch (hdev->discovery.type) {
3791 case DISCOV_TYPE_LE:
3792 hci_dev_lock(hdev);
3793 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3794 hci_dev_unlock(hdev);
3795 break;
3796
3797 case DISCOV_TYPE_INTERLEAVED:
3798 hci_req_init(&req, hdev);
3799
3800 memset(&cp, 0, sizeof(cp));
3801 memcpy(&cp.lap, lap, sizeof(cp.lap));
3802 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3803 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3804
3805 hci_dev_lock(hdev);
3806
3807 hci_inquiry_cache_flush(hdev);
3808
3809 err = hci_req_run(&req, inquiry_complete);
3810 if (err) {
3811 BT_ERR("Inquiry request failed: err %d", err);
3812 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3813 }
3814
3815 hci_dev_unlock(hdev);
3816 break;
3817 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003818}
3819
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003820static void le_scan_disable_work(struct work_struct *work)
3821{
3822 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003823 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003824 struct hci_request req;
3825 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003826
3827 BT_DBG("%s", hdev->name);
3828
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003829 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003830
Andre Guedesb1efcc22014-02-26 20:21:40 -03003831 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003832
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003833 err = hci_req_run(&req, le_scan_disable_work_complete);
3834 if (err)
3835 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003836}
3837
Johan Hedberg8d972502014-02-28 12:54:14 +02003838static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3839{
3840 struct hci_dev *hdev = req->hdev;
3841
3842 /* If we're advertising or initiating an LE connection we can't
3843 * go ahead and change the random address at this time. This is
3844 * because the eventual initiator address used for the
3845 * subsequently created connection will be undefined (some
3846 * controllers use the new address and others the one we had
3847 * when the operation started).
3848 *
3849 * In this kind of scenario skip the update and let the random
3850 * address be updated at the next cycle.
3851 */
Johan Hedberg5ce194c2014-07-08 15:07:49 +03003852 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
Johan Hedberg8d972502014-02-28 12:54:14 +02003853 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3854 BT_DBG("Deferring random address update");
Johan Hedberg9a783a12014-09-12 09:31:52 -07003855 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Johan Hedberg8d972502014-02-28 12:54:14 +02003856 return;
3857 }
3858
3859 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3860}
3861
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003862int hci_update_random_address(struct hci_request *req, bool require_privacy,
3863 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003864{
3865 struct hci_dev *hdev = req->hdev;
3866 int err;
3867
3868 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003869 * current RPA has expired or there is something else than
3870 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003871 */
3872 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003873 int to;
3874
3875 *own_addr_type = ADDR_LE_DEV_RANDOM;
3876
3877 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003878 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003879 return 0;
3880
Johan Hedbergdefce9e2014-08-08 09:37:17 +03003881 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003882 if (err < 0) {
3883 BT_ERR("%s failed to generate new RPA", hdev->name);
3884 return err;
3885 }
3886
Johan Hedberg8d972502014-02-28 12:54:14 +02003887 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003888
3889 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3890 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3891
3892 return 0;
3893 }
3894
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003895 /* In case of required privacy without resolvable private address,
3896 * use an unresolvable private address. This is useful for active
3897 * scanning and non-connectable advertising.
3898 */
3899 if (require_privacy) {
3900 bdaddr_t urpa;
3901
3902 get_random_bytes(&urpa, 6);
3903 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3904
3905 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003906 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003907 return 0;
3908 }
3909
Johan Hedbergebd3a742014-02-23 19:42:21 +02003910 /* If forcing static address is in use or there is no public
3911 * address use the static address as random address (but skip
3912 * the HCI command if the current random address is already the
3913 * static one.
3914 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003915 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003916 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3917 *own_addr_type = ADDR_LE_DEV_RANDOM;
3918 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3919 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3920 &hdev->static_addr);
3921 return 0;
3922 }
3923
3924 /* Neither privacy nor static address is being used so use a
3925 * public address.
3926 */
3927 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3928
3929 return 0;
3930}
3931
Johan Hedberga1f4c312014-02-27 14:05:41 +02003932/* Copy the Identity Address of the controller.
3933 *
3934 * If the controller has a public BD_ADDR, then by default use that one.
3935 * If this is a LE only controller without a public address, default to
3936 * the static random address.
3937 *
3938 * For debugging purposes it is possible to force controllers with a
3939 * public address to use the static random address instead.
3940 */
3941void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3942 u8 *bdaddr_type)
3943{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003944 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003945 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3946 bacpy(bdaddr, &hdev->static_addr);
3947 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3948 } else {
3949 bacpy(bdaddr, &hdev->bdaddr);
3950 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3951 }
3952}
3953
David Herrmann9be0dab2012-04-22 14:39:57 +02003954/* Alloc HCI device */
3955struct hci_dev *hci_alloc_dev(void)
3956{
3957 struct hci_dev *hdev;
3958
Johan Hedberg27f70f32014-07-21 10:50:06 +03003959 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
David Herrmann9be0dab2012-04-22 14:39:57 +02003960 if (!hdev)
3961 return NULL;
3962
David Herrmannb1b813d2012-04-22 14:39:58 +02003963 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3964 hdev->esco_type = (ESCO_HV1);
3965 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003966 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3967 hdev->io_capability = 0x03; /* No Input No Output */
Marcel Holtmann96c21032014-07-02 11:30:51 +02003968 hdev->manufacturer = 0xffff; /* Default to internal use */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003969 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3970 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003971
David Herrmannb1b813d2012-04-22 14:39:58 +02003972 hdev->sniff_max_interval = 800;
3973 hdev->sniff_min_interval = 80;
3974
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003975 hdev->le_adv_channel_map = 0x07;
Georg Lukas628531c2014-07-26 13:59:57 +02003976 hdev->le_adv_min_interval = 0x0800;
3977 hdev->le_adv_max_interval = 0x0800;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003978 hdev->le_scan_interval = 0x0060;
3979 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003980 hdev->le_conn_min_interval = 0x0028;
3981 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003982 hdev->le_conn_latency = 0x0000;
3983 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003984
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003985 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003986 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003987 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3988 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003989
David Herrmannb1b813d2012-04-22 14:39:58 +02003990 mutex_init(&hdev->lock);
3991 mutex_init(&hdev->req_lock);
3992
3993 INIT_LIST_HEAD(&hdev->mgmt_pending);
3994 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03003995 INIT_LIST_HEAD(&hdev->whitelist);
David Herrmannb1b813d2012-04-22 14:39:58 +02003996 INIT_LIST_HEAD(&hdev->uuids);
3997 INIT_LIST_HEAD(&hdev->link_keys);
3998 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003999 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02004000 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004001 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03004002 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03004003 INIT_LIST_HEAD(&hdev->pend_le_conns);
Johan Hedberg66f84552014-07-04 12:37:18 +03004004 INIT_LIST_HEAD(&hdev->pend_le_reports);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03004005 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02004006
4007 INIT_WORK(&hdev->rx_work, hci_rx_work);
4008 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4009 INIT_WORK(&hdev->tx_work, hci_tx_work);
4010 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02004011
David Herrmannb1b813d2012-04-22 14:39:58 +02004012 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4013 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4014 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4015
David Herrmannb1b813d2012-04-22 14:39:58 +02004016 skb_queue_head_init(&hdev->rx_q);
4017 skb_queue_head_init(&hdev->cmd_q);
4018 skb_queue_head_init(&hdev->raw_q);
4019
4020 init_waitqueue_head(&hdev->req_wait_q);
4021
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02004022 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02004023
David Herrmannb1b813d2012-04-22 14:39:58 +02004024 hci_init_sysfs(hdev);
4025 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02004026
4027 return hdev;
4028}
4029EXPORT_SYMBOL(hci_alloc_dev);
4030
4031/* Free HCI device */
4032void hci_free_dev(struct hci_dev *hdev)
4033{
David Herrmann9be0dab2012-04-22 14:39:57 +02004034 /* will free via device release */
4035 put_device(&hdev->dev);
4036}
4037EXPORT_SYMBOL(hci_free_dev);
4038
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039/* Register HCI device */
4040int hci_register_dev(struct hci_dev *hdev)
4041{
David Herrmannb1b813d2012-04-22 14:39:58 +02004042 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043
Marcel Holtmann74292d52014-07-06 15:50:27 +02004044 if (!hdev->open || !hdev->close || !hdev->send)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 return -EINVAL;
4046
Mat Martineau08add512011-11-02 16:18:36 -07004047 /* Do not allow HCI_AMP devices to register at index 0,
4048 * so the index can be used as the AMP controller ID.
4049 */
Sasha Levin3df92b32012-05-27 22:36:56 +02004050 switch (hdev->dev_type) {
4051 case HCI_BREDR:
4052 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4053 break;
4054 case HCI_AMP:
4055 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4056 break;
4057 default:
4058 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004060
Sasha Levin3df92b32012-05-27 22:36:56 +02004061 if (id < 0)
4062 return id;
4063
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 sprintf(hdev->name, "hci%d", id);
4065 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03004066
4067 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4068
Kees Cookd8537542013-07-03 15:04:57 -07004069 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4070 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02004071 if (!hdev->workqueue) {
4072 error = -ENOMEM;
4073 goto err;
4074 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004075
Kees Cookd8537542013-07-03 15:04:57 -07004076 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4077 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004078 if (!hdev->req_workqueue) {
4079 destroy_workqueue(hdev->workqueue);
4080 error = -ENOMEM;
4081 goto err;
4082 }
4083
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004084 if (!IS_ERR_OR_NULL(bt_debugfs))
4085 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4086
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004087 dev_set_name(&hdev->dev, "%s", hdev->name);
4088
4089 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02004090 if (error < 0)
Johan Hedberg54506912014-08-08 09:32:51 +03004091 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004093 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004094 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4095 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004096 if (hdev->rfkill) {
4097 if (rfkill_register(hdev->rfkill) < 0) {
4098 rfkill_destroy(hdev->rfkill);
4099 hdev->rfkill = NULL;
4100 }
4101 }
4102
Johan Hedberg5e130362013-09-13 08:58:17 +03004103 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4104 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4105
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02004106 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07004107 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004108
Marcel Holtmann01cd3402013-10-06 01:16:22 -07004109 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03004110 /* Assume BR/EDR support until proven otherwise (such as
4111 * through reading supported features during init.
4112 */
4113 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4114 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03004115
Gustavo Padovanfcee3372013-07-11 11:34:28 +01004116 write_lock(&hci_dev_list_lock);
4117 list_add(&hdev->list, &hci_dev_list);
4118 write_unlock(&hci_dev_list_lock);
4119
Marcel Holtmann4a964402014-07-02 19:10:33 +02004120 /* Devices that are marked for raw-only usage are unconfigured
4121 * and should not be included in normal operation.
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004122 */
4123 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
Marcel Holtmann4a964402014-07-02 19:10:33 +02004124 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004125
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01004127 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128
Johan Hedberg19202572013-01-14 22:33:51 +02004129 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07004130
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004132
David Herrmann33ca9542011-10-08 14:58:49 +02004133err_wqueue:
4134 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004135 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02004136err:
Sasha Levin3df92b32012-05-27 22:36:56 +02004137 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004138
David Herrmann33ca9542011-10-08 14:58:49 +02004139 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140}
4141EXPORT_SYMBOL(hci_register_dev);
4142
4143/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02004144void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145{
Sasha Levin3df92b32012-05-27 22:36:56 +02004146 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02004147
Marcel Holtmannc13854c2010-02-08 15:27:07 +01004148 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149
Johan Hovold94324962012-03-15 14:48:41 +01004150 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4151
Sasha Levin3df92b32012-05-27 22:36:56 +02004152 id = hdev->id;
4153
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004154 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004155 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004156 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157
4158 hci_dev_do_close(hdev);
4159
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05304160 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02004161 kfree_skb(hdev->reassembly[i]);
4162
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02004163 cancel_work_sync(&hdev->power_on);
4164
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004165 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannd603b762014-07-06 12:11:14 +02004166 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4167 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004168 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02004169 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004170 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02004171 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02004172
Johan Hedberg2e58ef32011-11-08 20:40:15 +02004173 /* mgmt_index_removed should take care of emptying the
4174 * pending list */
4175 BUG_ON(!list_empty(&hdev->mgmt_pending));
4176
Linus Torvalds1da177e2005-04-16 15:20:36 -07004177 hci_notify(hdev, HCI_DEV_UNREG);
4178
Marcel Holtmann611b30f2009-06-08 14:41:38 +02004179 if (hdev->rfkill) {
4180 rfkill_unregister(hdev->rfkill);
4181 rfkill_destroy(hdev->rfkill);
4182 }
4183
Johan Hedberg711eafe2014-08-08 09:32:52 +03004184 smp_unregister(hdev);
Johan Hedberg99780a72014-02-18 10:40:07 +02004185
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07004186 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08004187
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07004188 debugfs_remove_recursive(hdev->debugfs);
4189
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004190 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02004191 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004192
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004193 hci_dev_lock(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004194 hci_bdaddr_list_clear(&hdev->blacklist);
Johan Hedberg66593582014-07-09 12:59:14 +03004195 hci_bdaddr_list_clear(&hdev->whitelist);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004196 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004197 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004198 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004199 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004200 hci_remote_oob_data_clear(hdev);
Johan Hedbergdcc36c12014-07-09 12:59:13 +03004201 hci_bdaddr_list_clear(&hdev->le_white_list);
Johan Hedberg373110c2014-07-02 17:37:25 +03004202 hci_conn_params_clear_all(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004203 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004204
David Herrmanndc946bd2012-01-07 15:47:24 +01004205 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004206
4207 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208}
4209EXPORT_SYMBOL(hci_unregister_dev);
4210
4211/* Suspend HCI device */
4212int hci_suspend_dev(struct hci_dev *hdev)
4213{
4214 hci_notify(hdev, HCI_DEV_SUSPEND);
4215 return 0;
4216}
4217EXPORT_SYMBOL(hci_suspend_dev);
4218
4219/* Resume HCI device */
4220int hci_resume_dev(struct hci_dev *hdev)
4221{
4222 hci_notify(hdev, HCI_DEV_RESUME);
4223 return 0;
4224}
4225EXPORT_SYMBOL(hci_resume_dev);
4226
Marcel Holtmann75e05692014-11-02 08:15:38 +01004227/* Reset HCI device */
4228int hci_reset_dev(struct hci_dev *hdev)
4229{
4230 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4231 struct sk_buff *skb;
4232
4233 skb = bt_skb_alloc(3, GFP_ATOMIC);
4234 if (!skb)
4235 return -ENOMEM;
4236
4237 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4238 memcpy(skb_put(skb, 3), hw_err, 3);
4239
4240 /* Send Hardware Error to upper stack */
4241 return hci_recv_frame(hdev, skb);
4242}
4243EXPORT_SYMBOL(hci_reset_dev);
4244
Marcel Holtmann76bca882009-11-18 00:40:39 +01004245/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004246int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004247{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004248 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004249 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004250 kfree_skb(skb);
4251 return -ENXIO;
4252 }
4253
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004254 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004255 bt_cb(skb)->incoming = 1;
4256
4257 /* Time stamp */
4258 __net_timestamp(skb);
4259
Marcel Holtmann76bca882009-11-18 00:40:39 +01004260 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004261 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004262
Marcel Holtmann76bca882009-11-18 00:40:39 +01004263 return 0;
4264}
4265EXPORT_SYMBOL(hci_recv_frame);
4266
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304267static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004268 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304269{
4270 int len = 0;
4271 int hlen = 0;
4272 int remain = count;
4273 struct sk_buff *skb;
4274 struct bt_skb_cb *scb;
4275
4276 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004277 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304278 return -EILSEQ;
4279
4280 skb = hdev->reassembly[index];
4281
4282 if (!skb) {
4283 switch (type) {
4284 case HCI_ACLDATA_PKT:
4285 len = HCI_MAX_FRAME_SIZE;
4286 hlen = HCI_ACL_HDR_SIZE;
4287 break;
4288 case HCI_EVENT_PKT:
4289 len = HCI_MAX_EVENT_SIZE;
4290 hlen = HCI_EVENT_HDR_SIZE;
4291 break;
4292 case HCI_SCODATA_PKT:
4293 len = HCI_MAX_SCO_SIZE;
4294 hlen = HCI_SCO_HDR_SIZE;
4295 break;
4296 }
4297
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004298 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304299 if (!skb)
4300 return -ENOMEM;
4301
4302 scb = (void *) skb->cb;
4303 scb->expect = hlen;
4304 scb->pkt_type = type;
4305
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304306 hdev->reassembly[index] = skb;
4307 }
4308
4309 while (count) {
4310 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004311 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304312
4313 memcpy(skb_put(skb, len), data, len);
4314
4315 count -= len;
4316 data += len;
4317 scb->expect -= len;
4318 remain = count;
4319
4320 switch (type) {
4321 case HCI_EVENT_PKT:
4322 if (skb->len == HCI_EVENT_HDR_SIZE) {
4323 struct hci_event_hdr *h = hci_event_hdr(skb);
4324 scb->expect = h->plen;
4325
4326 if (skb_tailroom(skb) < scb->expect) {
4327 kfree_skb(skb);
4328 hdev->reassembly[index] = NULL;
4329 return -ENOMEM;
4330 }
4331 }
4332 break;
4333
4334 case HCI_ACLDATA_PKT:
4335 if (skb->len == HCI_ACL_HDR_SIZE) {
4336 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4337 scb->expect = __le16_to_cpu(h->dlen);
4338
4339 if (skb_tailroom(skb) < scb->expect) {
4340 kfree_skb(skb);
4341 hdev->reassembly[index] = NULL;
4342 return -ENOMEM;
4343 }
4344 }
4345 break;
4346
4347 case HCI_SCODATA_PKT:
4348 if (skb->len == HCI_SCO_HDR_SIZE) {
4349 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4350 scb->expect = h->dlen;
4351
4352 if (skb_tailroom(skb) < scb->expect) {
4353 kfree_skb(skb);
4354 hdev->reassembly[index] = NULL;
4355 return -ENOMEM;
4356 }
4357 }
4358 break;
4359 }
4360
4361 if (scb->expect == 0) {
4362 /* Complete frame */
4363
4364 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004365 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304366
4367 hdev->reassembly[index] = NULL;
4368 return remain;
4369 }
4370 }
4371
4372 return remain;
4373}
4374
Suraj Sumangala99811512010-07-14 13:02:19 +05304375#define STREAM_REASSEMBLY 0
4376
4377int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4378{
4379 int type;
4380 int rem = 0;
4381
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004382 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304383 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4384
4385 if (!skb) {
4386 struct { char type; } *pkt;
4387
4388 /* Start of the frame */
4389 pkt = data;
4390 type = pkt->type;
4391
4392 data++;
4393 count--;
4394 } else
4395 type = bt_cb(skb)->pkt_type;
4396
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004397 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004398 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304399 if (rem < 0)
4400 return rem;
4401
4402 data += (count - rem);
4403 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004404 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304405
4406 return rem;
4407}
4408EXPORT_SYMBOL(hci_recv_stream_fragment);
4409
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410/* ---- Interface to upper protocols ---- */
4411
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412int hci_register_cb(struct hci_cb *cb)
4413{
4414 BT_DBG("%p name %s", cb, cb->name);
4415
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004416 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004418 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419
4420 return 0;
4421}
4422EXPORT_SYMBOL(hci_register_cb);
4423
4424int hci_unregister_cb(struct hci_cb *cb)
4425{
4426 BT_DBG("%p name %s", cb, cb->name);
4427
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004428 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004429 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004430 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004431
4432 return 0;
4433}
4434EXPORT_SYMBOL(hci_unregister_cb);
4435
Marcel Holtmann51086992013-10-10 14:54:19 -07004436static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004437{
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004438 int err;
4439
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004440 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004441
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004442 /* Time stamp */
4443 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004444
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004445 /* Send copy to monitor */
4446 hci_send_to_monitor(hdev, skb);
4447
4448 if (atomic_read(&hdev->promisc)) {
4449 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004450 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451 }
4452
4453 /* Get rid of skb owner, prior to sending to the driver. */
4454 skb_orphan(skb);
4455
Marcel Holtmanncdc52fa2014-07-06 15:36:15 +02004456 err = hdev->send(hdev, skb);
4457 if (err < 0) {
4458 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4459 kfree_skb(skb);
4460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461}
4462
Johan Hedberg3119ae92013-03-05 20:37:44 +02004463void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4464{
4465 skb_queue_head_init(&req->cmd_q);
4466 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004467 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004468}
4469
4470int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4471{
4472 struct hci_dev *hdev = req->hdev;
4473 struct sk_buff *skb;
4474 unsigned long flags;
4475
4476 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4477
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004478 /* If an error occurred during request building, remove all HCI
Andre Guedes5d73e032013-03-08 11:20:16 -03004479 * commands queued on the HCI request queue.
4480 */
4481 if (req->err) {
4482 skb_queue_purge(&req->cmd_q);
4483 return req->err;
4484 }
4485
Johan Hedberg3119ae92013-03-05 20:37:44 +02004486 /* Do not allow empty requests */
4487 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004488 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004489
4490 skb = skb_peek_tail(&req->cmd_q);
4491 bt_cb(skb)->req.complete = complete;
4492
4493 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4494 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4495 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4496
4497 queue_work(hdev->workqueue, &hdev->cmd_work);
4498
4499 return 0;
4500}
4501
Marcel Holtmann899de762014-07-11 05:51:58 +02004502bool hci_req_pending(struct hci_dev *hdev)
4503{
4504 return (hdev->req_status == HCI_REQ_PEND);
4505}
4506
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004507static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004508 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509{
4510 int len = HCI_COMMAND_HDR_SIZE + plen;
4511 struct hci_command_hdr *hdr;
4512 struct sk_buff *skb;
4513
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004515 if (!skb)
4516 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517
4518 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004519 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 hdr->plen = plen;
4521
4522 if (plen)
4523 memcpy(skb_put(skb, plen), param, plen);
4524
4525 BT_DBG("skb len %d", skb->len);
4526
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004527 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmann43e73e42014-09-14 23:06:28 +02004528 bt_cb(skb)->opcode = opcode;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004529
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004530 return skb;
4531}
4532
4533/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004534int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4535 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004536{
4537 struct sk_buff *skb;
4538
4539 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4540
4541 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4542 if (!skb) {
4543 BT_ERR("%s no memory for command", hdev->name);
4544 return -ENOMEM;
4545 }
4546
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004547 /* Stand-alone HCI commands must be flagged as
Johan Hedberg11714b32013-03-05 20:37:47 +02004548 * single-command requests.
4549 */
4550 bt_cb(skb)->req.start = true;
4551
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004553 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004554
4555 return 0;
4556}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004557
Johan Hedberg71c76a12013-03-05 20:37:46 +02004558/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004559void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4560 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004561{
4562 struct hci_dev *hdev = req->hdev;
4563 struct sk_buff *skb;
4564
4565 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4566
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004567 /* If an error occurred during request building, there is no point in
Andre Guedes34739c12013-03-08 11:20:18 -03004568 * queueing the HCI command. We can simply return.
4569 */
4570 if (req->err)
4571 return;
4572
Johan Hedberg71c76a12013-03-05 20:37:46 +02004573 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4574 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004575 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4576 hdev->name, opcode);
4577 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004578 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004579 }
4580
4581 if (skb_queue_empty(&req->cmd_q))
4582 bt_cb(skb)->req.start = true;
4583
Johan Hedberg02350a72013-04-03 21:50:29 +03004584 bt_cb(skb)->req.event = event;
4585
Johan Hedberg71c76a12013-03-05 20:37:46 +02004586 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004587}
4588
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004589void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4590 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004591{
4592 hci_req_add_ev(req, opcode, plen, param, 0);
4593}
4594
Linus Torvalds1da177e2005-04-16 15:20:36 -07004595/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004596void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004597{
4598 struct hci_command_hdr *hdr;
4599
4600 if (!hdev->sent_cmd)
4601 return NULL;
4602
4603 hdr = (void *) hdev->sent_cmd->data;
4604
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004605 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004606 return NULL;
4607
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004608 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004609
4610 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4611}
4612
4613/* Send ACL data */
4614static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4615{
4616 struct hci_acl_hdr *hdr;
4617 int len = skb->len;
4618
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004619 skb_push(skb, HCI_ACL_HDR_SIZE);
4620 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004621 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004622 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4623 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004624}
4625
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004626static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004627 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004628{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004629 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630 struct hci_dev *hdev = conn->hdev;
4631 struct sk_buff *list;
4632
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004633 skb->len = skb_headlen(skb);
4634 skb->data_len = 0;
4635
4636 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004637
4638 switch (hdev->dev_type) {
4639 case HCI_BREDR:
4640 hci_add_acl_hdr(skb, conn->handle, flags);
4641 break;
4642 case HCI_AMP:
4643 hci_add_acl_hdr(skb, chan->handle, flags);
4644 break;
4645 default:
4646 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4647 return;
4648 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004650 list = skb_shinfo(skb)->frag_list;
4651 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004652 /* Non fragmented */
4653 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4654
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004655 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004656 } else {
4657 /* Fragmented */
4658 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4659
4660 skb_shinfo(skb)->frag_list = NULL;
4661
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004662 /* Queue all fragments atomically. We need to use spin_lock_bh
4663 * here because of 6LoWPAN links, as there this function is
4664 * called from softirq and using normal spin lock could cause
4665 * deadlocks.
4666 */
4667 spin_lock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004668
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004669 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004670
4671 flags &= ~ACL_START;
4672 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004673 do {
4674 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004675
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004676 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004677 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004678
4679 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4680
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004681 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004682 } while (list);
4683
Jukka Rissanen9cfd5a22014-10-29 10:16:00 +02004684 spin_unlock_bh(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004686}
4687
4688void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4689{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004690 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004691
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004692 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004693
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004694 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004696 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004698
4699/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004700void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004701{
4702 struct hci_dev *hdev = conn->hdev;
4703 struct hci_sco_hdr hdr;
4704
4705 BT_DBG("%s len %d", hdev->name, skb->len);
4706
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004707 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 hdr.dlen = skb->len;
4709
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004710 skb_push(skb, HCI_SCO_HDR_SIZE);
4711 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004712 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004713
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004714 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004715
Linus Torvalds1da177e2005-04-16 15:20:36 -07004716 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004717 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004718}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004719
4720/* ---- HCI TX task (outgoing data) ---- */
4721
4722/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004723static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4724 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004725{
4726 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004727 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004728 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004729
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004730 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004731 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004732
4733 rcu_read_lock();
4734
4735 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004736 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004738
4739 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4740 continue;
4741
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742 num++;
4743
4744 if (c->sent < min) {
4745 min = c->sent;
4746 conn = c;
4747 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004748
4749 if (hci_conn_num(hdev, type) == num)
4750 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004751 }
4752
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004753 rcu_read_unlock();
4754
Linus Torvalds1da177e2005-04-16 15:20:36 -07004755 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004756 int cnt, q;
4757
4758 switch (conn->type) {
4759 case ACL_LINK:
4760 cnt = hdev->acl_cnt;
4761 break;
4762 case SCO_LINK:
4763 case ESCO_LINK:
4764 cnt = hdev->sco_cnt;
4765 break;
4766 case LE_LINK:
4767 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4768 break;
4769 default:
4770 cnt = 0;
4771 BT_ERR("Unknown link type");
4772 }
4773
4774 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 *quote = q ? q : 1;
4776 } else
4777 *quote = 0;
4778
4779 BT_DBG("conn %p quote %d", conn, *quote);
4780 return conn;
4781}
4782
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004783static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004784{
4785 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004786 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787
Ville Tervobae1f5d92011-02-10 22:38:53 -03004788 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004789
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004790 rcu_read_lock();
4791
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004793 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004794 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004795 BT_ERR("%s killing stalled connection %pMR",
4796 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004797 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004798 }
4799 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004800
4801 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004802}
4803
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004804static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4805 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004806{
4807 struct hci_conn_hash *h = &hdev->conn_hash;
4808 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004809 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004810 struct hci_conn *conn;
4811 int cnt, q, conn_num = 0;
4812
4813 BT_DBG("%s", hdev->name);
4814
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004815 rcu_read_lock();
4816
4817 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004818 struct hci_chan *tmp;
4819
4820 if (conn->type != type)
4821 continue;
4822
4823 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4824 continue;
4825
4826 conn_num++;
4827
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004828 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004829 struct sk_buff *skb;
4830
4831 if (skb_queue_empty(&tmp->data_q))
4832 continue;
4833
4834 skb = skb_peek(&tmp->data_q);
4835 if (skb->priority < cur_prio)
4836 continue;
4837
4838 if (skb->priority > cur_prio) {
4839 num = 0;
4840 min = ~0;
4841 cur_prio = skb->priority;
4842 }
4843
4844 num++;
4845
4846 if (conn->sent < min) {
4847 min = conn->sent;
4848 chan = tmp;
4849 }
4850 }
4851
4852 if (hci_conn_num(hdev, type) == conn_num)
4853 break;
4854 }
4855
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004856 rcu_read_unlock();
4857
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004858 if (!chan)
4859 return NULL;
4860
4861 switch (chan->conn->type) {
4862 case ACL_LINK:
4863 cnt = hdev->acl_cnt;
4864 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004865 case AMP_LINK:
4866 cnt = hdev->block_cnt;
4867 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004868 case SCO_LINK:
4869 case ESCO_LINK:
4870 cnt = hdev->sco_cnt;
4871 break;
4872 case LE_LINK:
4873 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4874 break;
4875 default:
4876 cnt = 0;
4877 BT_ERR("Unknown link type");
4878 }
4879
4880 q = cnt / num;
4881 *quote = q ? q : 1;
4882 BT_DBG("chan %p quote %d", chan, *quote);
4883 return chan;
4884}
4885
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004886static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4887{
4888 struct hci_conn_hash *h = &hdev->conn_hash;
4889 struct hci_conn *conn;
4890 int num = 0;
4891
4892 BT_DBG("%s", hdev->name);
4893
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004894 rcu_read_lock();
4895
4896 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004897 struct hci_chan *chan;
4898
4899 if (conn->type != type)
4900 continue;
4901
4902 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4903 continue;
4904
4905 num++;
4906
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004907 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004908 struct sk_buff *skb;
4909
4910 if (chan->sent) {
4911 chan->sent = 0;
4912 continue;
4913 }
4914
4915 if (skb_queue_empty(&chan->data_q))
4916 continue;
4917
4918 skb = skb_peek(&chan->data_q);
4919 if (skb->priority >= HCI_PRIO_MAX - 1)
4920 continue;
4921
4922 skb->priority = HCI_PRIO_MAX - 1;
4923
4924 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004925 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004926 }
4927
4928 if (hci_conn_num(hdev, type) == num)
4929 break;
4930 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004931
4932 rcu_read_unlock();
4933
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004934}
4935
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004936static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4937{
4938 /* Calculate count of blocks used by this packet */
4939 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4940}
4941
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004942static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943{
Marcel Holtmann4a964402014-07-02 19:10:33 +02004944 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 /* ACL tx timeout must be longer than maximum
4946 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004947 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004948 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004949 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004951}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004952
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004953static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004954{
4955 unsigned int cnt = hdev->acl_cnt;
4956 struct hci_chan *chan;
4957 struct sk_buff *skb;
4958 int quote;
4959
4960 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004961
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004962 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004963 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004964 u32 priority = (skb_peek(&chan->data_q))->priority;
4965 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004966 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004967 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004968
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004969 /* Stop if priority has changed */
4970 if (skb->priority < priority)
4971 break;
4972
4973 skb = skb_dequeue(&chan->data_q);
4974
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004975 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004976 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004977
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004978 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004979 hdev->acl_last_tx = jiffies;
4980
4981 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004982 chan->sent++;
4983 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 }
4985 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004986
4987 if (cnt != hdev->acl_cnt)
4988 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989}
4990
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004991static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004992{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004993 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004994 struct hci_chan *chan;
4995 struct sk_buff *skb;
4996 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004997 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004998
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004999 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005000
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005001 BT_DBG("%s", hdev->name);
5002
5003 if (hdev->dev_type == HCI_AMP)
5004 type = AMP_LINK;
5005 else
5006 type = ACL_LINK;
5007
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005008 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005009 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005010 u32 priority = (skb_peek(&chan->data_q))->priority;
5011 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5012 int blocks;
5013
5014 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005015 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005016
5017 /* Stop if priority has changed */
5018 if (skb->priority < priority)
5019 break;
5020
5021 skb = skb_dequeue(&chan->data_q);
5022
5023 blocks = __get_blocks(hdev, skb);
5024 if (blocks > hdev->block_cnt)
5025 return;
5026
5027 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005028 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005029
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005030 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005031 hdev->acl_last_tx = jiffies;
5032
5033 hdev->block_cnt -= blocks;
5034 quote -= blocks;
5035
5036 chan->sent += blocks;
5037 chan->conn->sent += blocks;
5038 }
5039 }
5040
5041 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005042 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005043}
5044
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005045static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005046{
5047 BT_DBG("%s", hdev->name);
5048
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03005049 /* No ACL link over BR/EDR controller */
5050 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5051 return;
5052
5053 /* No AMP link over AMP controller */
5054 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02005055 return;
5056
5057 switch (hdev->flow_ctl_mode) {
5058 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5059 hci_sched_acl_pkt(hdev);
5060 break;
5061
5062 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5063 hci_sched_acl_blk(hdev);
5064 break;
5065 }
5066}
5067
Linus Torvalds1da177e2005-04-16 15:20:36 -07005068/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005069static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005070{
5071 struct hci_conn *conn;
5072 struct sk_buff *skb;
5073 int quote;
5074
5075 BT_DBG("%s", hdev->name);
5076
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005077 if (!hci_conn_num(hdev, SCO_LINK))
5078 return;
5079
Linus Torvalds1da177e2005-04-16 15:20:36 -07005080 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5081 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5082 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005083 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005084
5085 conn->sent++;
5086 if (conn->sent == ~0)
5087 conn->sent = 0;
5088 }
5089 }
5090}
5091
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005092static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005093{
5094 struct hci_conn *conn;
5095 struct sk_buff *skb;
5096 int quote;
5097
5098 BT_DBG("%s", hdev->name);
5099
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005100 if (!hci_conn_num(hdev, ESCO_LINK))
5101 return;
5102
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03005103 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5104 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005105 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5106 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005107 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02005108
5109 conn->sent++;
5110 if (conn->sent == ~0)
5111 conn->sent = 0;
5112 }
5113 }
5114}
5115
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005116static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005117{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005118 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005119 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005120 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005121
5122 BT_DBG("%s", hdev->name);
5123
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03005124 if (!hci_conn_num(hdev, LE_LINK))
5125 return;
5126
Marcel Holtmann4a964402014-07-02 19:10:33 +02005127 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005128 /* LE tx timeout must be longer than maximum
5129 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03005130 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005131 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03005132 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005133 }
5134
5135 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005136 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005137 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005138 u32 priority = (skb_peek(&chan->data_q))->priority;
5139 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005140 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005141 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005142
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02005143 /* Stop if priority has changed */
5144 if (skb->priority < priority)
5145 break;
5146
5147 skb = skb_dequeue(&chan->data_q);
5148
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005149 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005150 hdev->le_last_tx = jiffies;
5151
5152 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005153 chan->sent++;
5154 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005155 }
5156 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02005157
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005158 if (hdev->le_pkts)
5159 hdev->le_cnt = cnt;
5160 else
5161 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02005162
5163 if (cnt != tmp)
5164 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005165}
5166
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005167static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02005169 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 struct sk_buff *skb;
5171
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005172 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005173 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174
Marcel Holtmann52de5992013-09-03 18:08:38 -07005175 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5176 /* Schedule queues and send stuff to HCI driver */
5177 hci_sched_acl(hdev);
5178 hci_sched_sco(hdev);
5179 hci_sched_esco(hdev);
5180 hci_sched_le(hdev);
5181 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03005182
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 /* Send next queued raw (unknown type) packet */
5184 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005185 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186}
5187
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005188/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07005189
5190/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005191static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005192{
5193 struct hci_acl_hdr *hdr = (void *) skb->data;
5194 struct hci_conn *conn;
5195 __u16 handle, flags;
5196
5197 skb_pull(skb, HCI_ACL_HDR_SIZE);
5198
5199 handle = __le16_to_cpu(hdr->handle);
5200 flags = hci_flags(handle);
5201 handle = hci_handle(handle);
5202
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005203 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005204 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005205
5206 hdev->stat.acl_rx++;
5207
5208 hci_dev_lock(hdev);
5209 conn = hci_conn_hash_lookup_handle(hdev, handle);
5210 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005211
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005213 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005214
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005216 l2cap_recv_acldata(conn, skb, flags);
5217 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005218 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005219 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005220 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005221 }
5222
5223 kfree_skb(skb);
5224}
5225
5226/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005227static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005228{
5229 struct hci_sco_hdr *hdr = (void *) skb->data;
5230 struct hci_conn *conn;
5231 __u16 handle;
5232
5233 skb_pull(skb, HCI_SCO_HDR_SIZE);
5234
5235 handle = __le16_to_cpu(hdr->handle);
5236
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005237 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238
5239 hdev->stat.sco_rx++;
5240
5241 hci_dev_lock(hdev);
5242 conn = hci_conn_hash_lookup_handle(hdev, handle);
5243 hci_dev_unlock(hdev);
5244
5245 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005246 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005247 sco_recv_scodata(conn, skb);
5248 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005249 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005250 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005251 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 }
5253
5254 kfree_skb(skb);
5255}
5256
Johan Hedberg9238f362013-03-05 20:37:48 +02005257static bool hci_req_is_complete(struct hci_dev *hdev)
5258{
5259 struct sk_buff *skb;
5260
5261 skb = skb_peek(&hdev->cmd_q);
5262 if (!skb)
5263 return true;
5264
5265 return bt_cb(skb)->req.start;
5266}
5267
Johan Hedberg42c6b122013-03-05 20:37:49 +02005268static void hci_resend_last(struct hci_dev *hdev)
5269{
5270 struct hci_command_hdr *sent;
5271 struct sk_buff *skb;
5272 u16 opcode;
5273
5274 if (!hdev->sent_cmd)
5275 return;
5276
5277 sent = (void *) hdev->sent_cmd->data;
5278 opcode = __le16_to_cpu(sent->opcode);
5279 if (opcode == HCI_OP_RESET)
5280 return;
5281
5282 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5283 if (!skb)
5284 return;
5285
5286 skb_queue_head(&hdev->cmd_q, skb);
5287 queue_work(hdev->workqueue, &hdev->cmd_work);
5288}
5289
Johan Hedberg9238f362013-03-05 20:37:48 +02005290void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5291{
5292 hci_req_complete_t req_complete = NULL;
5293 struct sk_buff *skb;
5294 unsigned long flags;
5295
5296 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5297
Johan Hedberg42c6b122013-03-05 20:37:49 +02005298 /* If the completed command doesn't match the last one that was
5299 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005300 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005301 if (!hci_sent_cmd_data(hdev, opcode)) {
5302 /* Some CSR based controllers generate a spontaneous
5303 * reset complete event during init and any pending
5304 * command will never be completed. In such a case we
5305 * need to resend whatever was the last sent
5306 * command.
5307 */
5308 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5309 hci_resend_last(hdev);
5310
Johan Hedberg9238f362013-03-05 20:37:48 +02005311 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005312 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005313
5314 /* If the command succeeded and there's still more commands in
5315 * this request the request is not yet complete.
5316 */
5317 if (!status && !hci_req_is_complete(hdev))
5318 return;
5319
5320 /* If this was the last command in a request the complete
5321 * callback would be found in hdev->sent_cmd instead of the
5322 * command queue (hdev->cmd_q).
5323 */
5324 if (hdev->sent_cmd) {
5325 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005326
5327 if (req_complete) {
5328 /* We must set the complete callback to NULL to
5329 * avoid calling the callback more than once if
5330 * this function gets called again.
5331 */
5332 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5333
Johan Hedberg9238f362013-03-05 20:37:48 +02005334 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005335 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005336 }
5337
5338 /* Remove all pending commands belonging to this request */
5339 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5340 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5341 if (bt_cb(skb)->req.start) {
5342 __skb_queue_head(&hdev->cmd_q, skb);
5343 break;
5344 }
5345
5346 req_complete = bt_cb(skb)->req.complete;
5347 kfree_skb(skb);
5348 }
5349 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5350
5351call_complete:
5352 if (req_complete)
5353 req_complete(hdev, status);
5354}
5355
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005356static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005357{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005358 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005359 struct sk_buff *skb;
5360
5361 BT_DBG("%s", hdev->name);
5362
Linus Torvalds1da177e2005-04-16 15:20:36 -07005363 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005364 /* Send copy to monitor */
5365 hci_send_to_monitor(hdev, skb);
5366
Linus Torvalds1da177e2005-04-16 15:20:36 -07005367 if (atomic_read(&hdev->promisc)) {
5368 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005369 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 }
5371
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005372 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 kfree_skb(skb);
5374 continue;
5375 }
5376
5377 if (test_bit(HCI_INIT, &hdev->flags)) {
5378 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005379 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005380 case HCI_ACLDATA_PKT:
5381 case HCI_SCODATA_PKT:
5382 kfree_skb(skb);
5383 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005384 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385 }
5386
5387 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005388 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005389 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005390 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005391 hci_event_packet(hdev, skb);
5392 break;
5393
5394 case HCI_ACLDATA_PKT:
5395 BT_DBG("%s ACL data packet", hdev->name);
5396 hci_acldata_packet(hdev, skb);
5397 break;
5398
5399 case HCI_SCODATA_PKT:
5400 BT_DBG("%s SCO data packet", hdev->name);
5401 hci_scodata_packet(hdev, skb);
5402 break;
5403
5404 default:
5405 kfree_skb(skb);
5406 break;
5407 }
5408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005409}
5410
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005411static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005412{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005413 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005414 struct sk_buff *skb;
5415
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005416 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5417 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005418
Linus Torvalds1da177e2005-04-16 15:20:36 -07005419 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005420 if (atomic_read(&hdev->cmd_cnt)) {
5421 skb = skb_dequeue(&hdev->cmd_q);
5422 if (!skb)
5423 return;
5424
Wei Yongjun7585b972009-02-25 18:29:52 +08005425 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005426
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005427 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005428 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005429 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005430 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005431 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005432 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005433 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005434 schedule_delayed_work(&hdev->cmd_timer,
5435 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 } else {
5437 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005438 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005439 }
5440 }
5441}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005442
5443void hci_req_add_le_scan_disable(struct hci_request *req)
5444{
5445 struct hci_cp_le_set_scan_enable cp;
5446
5447 memset(&cp, 0, sizeof(cp));
5448 cp.enable = LE_SCAN_DISABLE;
5449 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5450}
Andre Guedesa4790db2014-02-26 20:21:47 -03005451
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005452static void add_to_white_list(struct hci_request *req,
5453 struct hci_conn_params *params)
5454{
5455 struct hci_cp_le_add_to_white_list cp;
5456
5457 cp.bdaddr_type = params->addr_type;
5458 bacpy(&cp.bdaddr, &params->addr);
5459
5460 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5461}
5462
5463static u8 update_white_list(struct hci_request *req)
5464{
5465 struct hci_dev *hdev = req->hdev;
5466 struct hci_conn_params *params;
5467 struct bdaddr_list *b;
5468 uint8_t white_list_entries = 0;
5469
5470 /* Go through the current white list programmed into the
5471 * controller one by one and check if that address is still
5472 * in the list of pending connections or list of devices to
5473 * report. If not present in either list, then queue the
5474 * command to remove it from the controller.
5475 */
5476 list_for_each_entry(b, &hdev->le_white_list, list) {
5477 struct hci_cp_le_del_from_white_list cp;
5478
5479 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5480 &b->bdaddr, b->bdaddr_type) ||
5481 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5482 &b->bdaddr, b->bdaddr_type)) {
5483 white_list_entries++;
5484 continue;
5485 }
5486
5487 cp.bdaddr_type = b->bdaddr_type;
5488 bacpy(&cp.bdaddr, &b->bdaddr);
5489
5490 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5491 sizeof(cp), &cp);
5492 }
5493
5494 /* Since all no longer valid white list entries have been
5495 * removed, walk through the list of pending connections
5496 * and ensure that any new device gets programmed into
5497 * the controller.
5498 *
5499 * If the list of the devices is larger than the list of
5500 * available white list entries in the controller, then
5501 * just abort and return filer policy value to not use the
5502 * white list.
5503 */
5504 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5505 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5506 &params->addr, params->addr_type))
5507 continue;
5508
5509 if (white_list_entries >= hdev->le_white_list_size) {
5510 /* Select filter policy to accept all advertising */
5511 return 0x00;
5512 }
5513
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005514 if (hci_find_irk_by_addr(hdev, &params->addr,
5515 params->addr_type)) {
5516 /* White list can not be used with RPAs */
5517 return 0x00;
5518 }
5519
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005520 white_list_entries++;
5521 add_to_white_list(req, params);
5522 }
5523
5524 /* After adding all new pending connections, walk through
5525 * the list of pending reports and also add these to the
5526 * white list if there is still space.
5527 */
5528 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5529 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5530 &params->addr, params->addr_type))
5531 continue;
5532
5533 if (white_list_entries >= hdev->le_white_list_size) {
5534 /* Select filter policy to accept all advertising */
5535 return 0x00;
5536 }
5537
Marcel Holtmann66d8e832014-07-24 15:20:58 +02005538 if (hci_find_irk_by_addr(hdev, &params->addr,
5539 params->addr_type)) {
5540 /* White list can not be used with RPAs */
5541 return 0x00;
5542 }
5543
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005544 white_list_entries++;
5545 add_to_white_list(req, params);
5546 }
5547
5548 /* Select filter policy to use white list */
5549 return 0x01;
5550}
5551
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005552void hci_req_add_le_passive_scan(struct hci_request *req)
5553{
5554 struct hci_cp_le_set_scan_param param_cp;
5555 struct hci_cp_le_set_scan_enable enable_cp;
5556 struct hci_dev *hdev = req->hdev;
5557 u8 own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005558 u8 filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005559
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005560 /* Set require_privacy to false since no SCAN_REQ are send
5561 * during passive scanning. Not using an unresolvable address
5562 * here is important so that peer devices using direct
5563 * advertising with our address will be correctly reported
5564 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005565 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005566 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005567 return;
5568
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005569 /* Adding or removing entries from the white list must
5570 * happen before enabling scanning. The controller does
5571 * not allow white list modification while scanning.
5572 */
5573 filter_policy = update_white_list(req);
5574
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005575 memset(&param_cp, 0, sizeof(param_cp));
5576 param_cp.type = LE_SCAN_PASSIVE;
5577 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5578 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5579 param_cp.own_address_type = own_addr_type;
Marcel Holtmann8540f6c2014-07-24 15:20:57 +02005580 param_cp.filter_policy = filter_policy;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005581 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5582 &param_cp);
5583
5584 memset(&enable_cp, 0, sizeof(enable_cp));
5585 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005586 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005587 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5588 &enable_cp);
5589}
5590
Andre Guedesa4790db2014-02-26 20:21:47 -03005591static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5592{
5593 if (status)
5594 BT_DBG("HCI request failed to update background scanning: "
5595 "status 0x%2.2x", status);
5596}
5597
5598/* This function controls the background scanning based on hdev->pend_le_conns
5599 * list. If there are pending LE connection we start the background scanning,
5600 * otherwise we stop it.
5601 *
5602 * This function requires the caller holds hdev->lock.
5603 */
5604void hci_update_background_scan(struct hci_dev *hdev)
5605{
Andre Guedesa4790db2014-02-26 20:21:47 -03005606 struct hci_request req;
5607 struct hci_conn *conn;
5608 int err;
5609
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005610 if (!test_bit(HCI_UP, &hdev->flags) ||
5611 test_bit(HCI_INIT, &hdev->flags) ||
5612 test_bit(HCI_SETUP, &hdev->dev_flags) ||
Marcel Holtmannd603b762014-07-06 12:11:14 +02005613 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
Marcel Holtmannb8221772014-07-01 19:28:23 +02005614 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
Marcel Holtmannc20c02d2014-06-30 16:04:12 +02005615 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005616 return;
5617
Johan Hedberga70f4b52014-07-07 15:19:50 +03005618 /* No point in doing scanning if LE support hasn't been enabled */
5619 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5620 return;
5621
Johan Hedbergae23ada2014-07-07 13:24:59 +03005622 /* If discovery is active don't interfere with it */
5623 if (hdev->discovery.state != DISCOVERY_STOPPED)
5624 return;
5625
Andre Guedesa4790db2014-02-26 20:21:47 -03005626 hci_req_init(&req, hdev);
5627
Johan Hedbergd1d588c2014-07-20 17:10:45 +03005628 if (list_empty(&hdev->pend_le_conns) &&
Johan Hedberg66f84552014-07-04 12:37:18 +03005629 list_empty(&hdev->pend_le_reports)) {
Johan Hedberg0d2bf132014-07-02 22:42:02 +03005630 /* If there is no pending LE connections or devices
5631 * to be scanned for, we should stop the background
5632 * scanning.
Andre Guedesa4790db2014-02-26 20:21:47 -03005633 */
5634
5635 /* If controller is not scanning we are done. */
5636 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5637 return;
5638
5639 hci_req_add_le_scan_disable(&req);
5640
5641 BT_DBG("%s stopping background scanning", hdev->name);
5642 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005643 /* If there is at least one pending LE connection, we should
5644 * keep the background scan running.
5645 */
5646
Andre Guedesa4790db2014-02-26 20:21:47 -03005647 /* If controller is connecting, we should not start scanning
5648 * since some controllers are not able to scan and connect at
5649 * the same time.
5650 */
5651 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5652 if (conn)
5653 return;
5654
Andre Guedes4340a122014-03-10 18:26:24 -03005655 /* If controller is currently scanning, we stop it to ensure we
5656 * don't miss any advertising (due to duplicates filter).
5657 */
5658 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5659 hci_req_add_le_scan_disable(&req);
5660
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005661 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005662
5663 BT_DBG("%s starting background scanning", hdev->name);
5664 }
5665
5666 err = hci_req_run(&req, update_background_scan_complete);
5667 if (err)
5668 BT_ERR("Failed to run HCI request: err %d", err);
5669}
Johan Hedberg432df052014-08-01 11:13:31 +03005670
Johan Hedberg22f433d2014-08-01 11:13:32 +03005671static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5672{
5673 struct bdaddr_list *b;
5674
5675 list_for_each_entry(b, &hdev->whitelist, list) {
5676 struct hci_conn *conn;
5677
5678 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5679 if (!conn)
5680 return true;
5681
5682 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5683 return true;
5684 }
5685
5686 return false;
5687}
5688
Johan Hedberg432df052014-08-01 11:13:31 +03005689void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5690{
5691 u8 scan;
5692
5693 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5694 return;
5695
5696 if (!hdev_is_powered(hdev))
5697 return;
5698
5699 if (mgmt_powering_down(hdev))
5700 return;
5701
5702 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
Johan Hedberg22f433d2014-08-01 11:13:32 +03005703 disconnected_whitelist_entries(hdev))
Johan Hedberg432df052014-08-01 11:13:31 +03005704 scan = SCAN_PAGE;
5705 else
5706 scan = SCAN_DISABLED;
5707
5708 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5709 return;
5710
5711 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5712 scan |= SCAN_INQUIRY;
5713
5714 if (req)
5715 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5716 else
5717 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5718}