blob: 8b206d0942aa2f943d80fe55ad7183020313dfe6 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800854static int adv_channel_map_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val < 0x01 || val > 0x07)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_adv_channel_map = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int adv_channel_map_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_adv_channel_map;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
880 adv_channel_map_set, "%llu\n");
881
Andre Guedes7d474e02014-02-26 20:21:54 -0300882static int le_auto_conn_show(struct seq_file *sf, void *ptr)
883{
884 struct hci_dev *hdev = sf->private;
885 struct hci_conn_params *p;
886
887 hci_dev_lock(hdev);
888
889 list_for_each_entry(p, &hdev->le_conn_params, list) {
890 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
891 p->auto_connect);
892 }
893
894 hci_dev_unlock(hdev);
895
896 return 0;
897}
898
899static int le_auto_conn_open(struct inode *inode, struct file *file)
900{
901 return single_open(file, le_auto_conn_show, inode->i_private);
902}
903
904static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
905 size_t count, loff_t *offset)
906{
907 struct seq_file *sf = file->private_data;
908 struct hci_dev *hdev = sf->private;
909 u8 auto_connect = 0;
910 bdaddr_t addr;
911 u8 addr_type;
912 char *buf;
913 int err = 0;
914 int n;
915
916 /* Don't allow partial write */
917 if (*offset != 0)
918 return -EINVAL;
919
920 if (count < 3)
921 return -EINVAL;
922
Andre Guedes4408dd12014-03-24 16:08:48 -0300923 buf = memdup_user(data, count);
924 if (IS_ERR(buf))
925 return PTR_ERR(buf);
Andre Guedes7d474e02014-02-26 20:21:54 -0300926
927 if (memcmp(buf, "add", 3) == 0) {
928 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
929 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
930 &addr.b[1], &addr.b[0], &addr_type,
931 &auto_connect);
932
933 if (n < 7) {
934 err = -EINVAL;
935 goto done;
936 }
937
938 hci_dev_lock(hdev);
939 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
940 hdev->le_conn_min_interval,
941 hdev->le_conn_max_interval);
942 hci_dev_unlock(hdev);
943
944 if (err)
945 goto done;
946 } else if (memcmp(buf, "del", 3) == 0) {
947 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
948 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
949 &addr.b[1], &addr.b[0], &addr_type);
950
951 if (n < 7) {
952 err = -EINVAL;
953 goto done;
954 }
955
956 hci_dev_lock(hdev);
957 hci_conn_params_del(hdev, &addr, addr_type);
958 hci_dev_unlock(hdev);
959 } else if (memcmp(buf, "clr", 3) == 0) {
960 hci_dev_lock(hdev);
961 hci_conn_params_clear(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300962 hci_update_background_scan(hdev);
963 hci_dev_unlock(hdev);
964 } else {
965 err = -EINVAL;
966 }
967
968done:
969 kfree(buf);
970
971 if (err)
972 return err;
973 else
974 return count;
975}
976
977static const struct file_operations le_auto_conn_fops = {
978 .open = le_auto_conn_open,
979 .read = seq_read,
980 .write = le_auto_conn_write,
981 .llseek = seq_lseek,
982 .release = single_release,
983};
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985/* ---- HCI requests ---- */
986
Johan Hedberg42c6b122013-03-05 20:37:49 +0200987static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200989 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990
991 if (hdev->req_status == HCI_REQ_PEND) {
992 hdev->req_result = result;
993 hdev->req_status = HCI_REQ_DONE;
994 wake_up_interruptible(&hdev->req_wait_q);
995 }
996}
997
998static void hci_req_cancel(struct hci_dev *hdev, int err)
999{
1000 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1001
1002 if (hdev->req_status == HCI_REQ_PEND) {
1003 hdev->req_result = err;
1004 hdev->req_status = HCI_REQ_CANCELED;
1005 wake_up_interruptible(&hdev->req_wait_q);
1006 }
1007}
1008
Fengguang Wu77a63e02013-04-20 16:24:31 +03001009static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1010 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001011{
1012 struct hci_ev_cmd_complete *ev;
1013 struct hci_event_hdr *hdr;
1014 struct sk_buff *skb;
1015
1016 hci_dev_lock(hdev);
1017
1018 skb = hdev->recv_evt;
1019 hdev->recv_evt = NULL;
1020
1021 hci_dev_unlock(hdev);
1022
1023 if (!skb)
1024 return ERR_PTR(-ENODATA);
1025
1026 if (skb->len < sizeof(*hdr)) {
1027 BT_ERR("Too short HCI event");
1028 goto failed;
1029 }
1030
1031 hdr = (void *) skb->data;
1032 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1033
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001034 if (event) {
1035 if (hdr->evt != event)
1036 goto failed;
1037 return skb;
1038 }
1039
Johan Hedberg75e84b72013-04-02 13:35:04 +03001040 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1041 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1042 goto failed;
1043 }
1044
1045 if (skb->len < sizeof(*ev)) {
1046 BT_ERR("Too short cmd_complete event");
1047 goto failed;
1048 }
1049
1050 ev = (void *) skb->data;
1051 skb_pull(skb, sizeof(*ev));
1052
1053 if (opcode == __le16_to_cpu(ev->opcode))
1054 return skb;
1055
1056 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1057 __le16_to_cpu(ev->opcode));
1058
1059failed:
1060 kfree_skb(skb);
1061 return ERR_PTR(-ENODATA);
1062}
1063
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001064struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001065 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001066{
1067 DECLARE_WAITQUEUE(wait, current);
1068 struct hci_request req;
1069 int err = 0;
1070
1071 BT_DBG("%s", hdev->name);
1072
1073 hci_req_init(&req, hdev);
1074
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001075 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001076
1077 hdev->req_status = HCI_REQ_PEND;
1078
1079 err = hci_req_run(&req, hci_req_sync_complete);
1080 if (err < 0)
1081 return ERR_PTR(err);
1082
1083 add_wait_queue(&hdev->req_wait_q, &wait);
1084 set_current_state(TASK_INTERRUPTIBLE);
1085
1086 schedule_timeout(timeout);
1087
1088 remove_wait_queue(&hdev->req_wait_q, &wait);
1089
1090 if (signal_pending(current))
1091 return ERR_PTR(-EINTR);
1092
1093 switch (hdev->req_status) {
1094 case HCI_REQ_DONE:
1095 err = -bt_to_errno(hdev->req_result);
1096 break;
1097
1098 case HCI_REQ_CANCELED:
1099 err = -hdev->req_result;
1100 break;
1101
1102 default:
1103 err = -ETIMEDOUT;
1104 break;
1105 }
1106
1107 hdev->req_status = hdev->req_result = 0;
1108
1109 BT_DBG("%s end: err %d", hdev->name, err);
1110
1111 if (err < 0)
1112 return ERR_PTR(err);
1113
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001114 return hci_get_cmd_complete(hdev, opcode, event);
1115}
1116EXPORT_SYMBOL(__hci_cmd_sync_ev);
1117
1118struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001119 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001120{
1121 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001122}
1123EXPORT_SYMBOL(__hci_cmd_sync);
1124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001126static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001127 void (*func)(struct hci_request *req,
1128 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001131 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 DECLARE_WAITQUEUE(wait, current);
1133 int err = 0;
1134
1135 BT_DBG("%s start", hdev->name);
1136
Johan Hedberg42c6b122013-03-05 20:37:49 +02001137 hci_req_init(&req, hdev);
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 hdev->req_status = HCI_REQ_PEND;
1140
Johan Hedberg42c6b122013-03-05 20:37:49 +02001141 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001142
Johan Hedberg42c6b122013-03-05 20:37:49 +02001143 err = hci_req_run(&req, hci_req_sync_complete);
1144 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001145 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001146
1147 /* ENODATA means the HCI request command queue is empty.
1148 * This can happen when a request with conditionals doesn't
1149 * trigger any commands to be sent. This is normal behavior
1150 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001151 */
Andre Guedes920c8302013-03-08 11:20:15 -03001152 if (err == -ENODATA)
1153 return 0;
1154
1155 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001156 }
1157
Andre Guedesbc4445c2013-03-08 11:20:13 -03001158 add_wait_queue(&hdev->req_wait_q, &wait);
1159 set_current_state(TASK_INTERRUPTIBLE);
1160
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 schedule_timeout(timeout);
1162
1163 remove_wait_queue(&hdev->req_wait_q, &wait);
1164
1165 if (signal_pending(current))
1166 return -EINTR;
1167
1168 switch (hdev->req_status) {
1169 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001170 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 break;
1172
1173 case HCI_REQ_CANCELED:
1174 err = -hdev->req_result;
1175 break;
1176
1177 default:
1178 err = -ETIMEDOUT;
1179 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001180 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
Johan Hedberga5040ef2011-01-10 13:28:59 +02001182 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 BT_DBG("%s end: err %d", hdev->name, err);
1185
1186 return err;
1187}
1188
Johan Hedberg01178cd2013-03-05 20:37:41 +02001189static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001190 void (*req)(struct hci_request *req,
1191 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001192 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
1194 int ret;
1195
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001196 if (!test_bit(HCI_UP, &hdev->flags))
1197 return -ENETDOWN;
1198
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 /* Serialize all requests */
1200 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001201 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 hci_req_unlock(hdev);
1203
1204 return ret;
1205}
1206
Johan Hedberg42c6b122013-03-05 20:37:49 +02001207static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001209 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001212 set_bit(HCI_RESET, &req->hdev->flags);
1213 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214}
1215
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001218 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001221 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001223 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001225
1226 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001227 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228}
1229
Johan Hedberg42c6b122013-03-05 20:37:49 +02001230static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001231{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001232 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001233
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001234 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001235 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001236
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001237 /* Read Local Supported Commands */
1238 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1239
1240 /* Read Local Supported Features */
1241 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1242
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001243 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001245
1246 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001248
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001249 /* Read Flow Control Mode */
1250 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1251
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001252 /* Read Location Data */
1253 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001254}
1255
Johan Hedberg42c6b122013-03-05 20:37:49 +02001256static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001257{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001258 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001259
1260 BT_DBG("%s %ld", hdev->name, opt);
1261
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001262 /* Reset */
1263 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001265
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001266 switch (hdev->dev_type) {
1267 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001268 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001269 break;
1270
1271 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001272 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001273 break;
1274
1275 default:
1276 BT_ERR("Unknown device type %d", hdev->dev_type);
1277 break;
1278 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001279}
1280
Johan Hedberg42c6b122013-03-05 20:37:49 +02001281static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001282{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001283 struct hci_dev *hdev = req->hdev;
1284
Johan Hedberg2177bab2013-03-05 20:37:43 +02001285 __le16 param;
1286 __u8 flt_type;
1287
1288 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293
1294 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001295 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001296
1297 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001298 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001299
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001300 /* Read Number of Supported IAC */
1301 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1302
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001303 /* Read Current IAC LAP */
1304 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1305
Johan Hedberg2177bab2013-03-05 20:37:43 +02001306 /* Clear Event Filters */
1307 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001308 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001309
1310 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001311 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001312 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001313
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001314 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1315 * but it does not support page scan related HCI commands.
1316 */
1317 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001318 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1319 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1320 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001321}
1322
Johan Hedberg42c6b122013-03-05 20:37:49 +02001323static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001324{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001325 struct hci_dev *hdev = req->hdev;
1326
Johan Hedberg2177bab2013-03-05 20:37:43 +02001327 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001328 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001329
1330 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001331 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001332
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001333 /* Read LE Supported States */
1334 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1335
Johan Hedberg2177bab2013-03-05 20:37:43 +02001336 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001337 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001338
1339 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001340 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001341
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001342 /* Clear LE White List */
1343 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001344
1345 /* LE-only controllers have LE implicitly enabled */
1346 if (!lmp_bredr_capable(hdev))
1347 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001348}
1349
1350static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1351{
1352 if (lmp_ext_inq_capable(hdev))
1353 return 0x02;
1354
1355 if (lmp_inq_rssi_capable(hdev))
1356 return 0x01;
1357
1358 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1359 hdev->lmp_subver == 0x0757)
1360 return 0x01;
1361
1362 if (hdev->manufacturer == 15) {
1363 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1364 return 0x01;
1365 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1366 return 0x01;
1367 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1368 return 0x01;
1369 }
1370
1371 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1372 hdev->lmp_subver == 0x1805)
1373 return 0x01;
1374
1375 return 0x00;
1376}
1377
Johan Hedberg42c6b122013-03-05 20:37:49 +02001378static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001379{
1380 u8 mode;
1381
Johan Hedberg42c6b122013-03-05 20:37:49 +02001382 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001383
Johan Hedberg42c6b122013-03-05 20:37:49 +02001384 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001385}
1386
Johan Hedberg42c6b122013-03-05 20:37:49 +02001387static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001388{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001389 struct hci_dev *hdev = req->hdev;
1390
Johan Hedberg2177bab2013-03-05 20:37:43 +02001391 /* The second byte is 0xff instead of 0x9f (two reserved bits
1392 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1393 * command otherwise.
1394 */
1395 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1396
1397 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1398 * any event mask for pre 1.2 devices.
1399 */
1400 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1401 return;
1402
1403 if (lmp_bredr_capable(hdev)) {
1404 events[4] |= 0x01; /* Flow Specification Complete */
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1407 events[5] |= 0x08; /* Synchronous Connection Complete */
1408 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001409 } else {
1410 /* Use a different default for LE-only devices */
1411 memset(events, 0, sizeof(events));
1412 events[0] |= 0x10; /* Disconnection Complete */
1413 events[0] |= 0x80; /* Encryption Change */
1414 events[1] |= 0x08; /* Read Remote Version Information Complete */
1415 events[1] |= 0x20; /* Command Complete */
1416 events[1] |= 0x40; /* Command Status */
1417 events[1] |= 0x80; /* Hardware Error */
1418 events[2] |= 0x04; /* Number of Completed Packets */
1419 events[3] |= 0x02; /* Data Buffer Overflow */
1420 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421 }
1422
1423 if (lmp_inq_rssi_capable(hdev))
1424 events[4] |= 0x02; /* Inquiry Result with RSSI */
1425
1426 if (lmp_sniffsubr_capable(hdev))
1427 events[5] |= 0x20; /* Sniff Subrating */
1428
1429 if (lmp_pause_enc_capable(hdev))
1430 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1431
1432 if (lmp_ext_inq_capable(hdev))
1433 events[5] |= 0x40; /* Extended Inquiry Result */
1434
1435 if (lmp_no_flush_capable(hdev))
1436 events[7] |= 0x01; /* Enhanced Flush Complete */
1437
1438 if (lmp_lsto_capable(hdev))
1439 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1440
1441 if (lmp_ssp_capable(hdev)) {
1442 events[6] |= 0x01; /* IO Capability Request */
1443 events[6] |= 0x02; /* IO Capability Response */
1444 events[6] |= 0x04; /* User Confirmation Request */
1445 events[6] |= 0x08; /* User Passkey Request */
1446 events[6] |= 0x10; /* Remote OOB Data Request */
1447 events[6] |= 0x20; /* Simple Pairing Complete */
1448 events[7] |= 0x04; /* User Passkey Notification */
1449 events[7] |= 0x08; /* Keypress Notification */
1450 events[7] |= 0x10; /* Remote Host Supported
1451 * Features Notification
1452 */
1453 }
1454
1455 if (lmp_le_capable(hdev))
1456 events[7] |= 0x20; /* LE Meta-Event */
1457
Johan Hedberg42c6b122013-03-05 20:37:49 +02001458 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001459
1460 if (lmp_le_capable(hdev)) {
1461 memset(events, 0, sizeof(events));
1462 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001463 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1464 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465 }
1466}
1467
Johan Hedberg42c6b122013-03-05 20:37:49 +02001468static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001469{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001470 struct hci_dev *hdev = req->hdev;
1471
Johan Hedberg2177bab2013-03-05 20:37:43 +02001472 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001474 else
1475 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001476
1477 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001478 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001479
Johan Hedberg42c6b122013-03-05 20:37:49 +02001480 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001482 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1483 * local supported commands HCI command.
1484 */
1485 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487
1488 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001489 /* When SSP is available, then the host features page
1490 * should also be available as well. However some
1491 * controllers list the max_page as 0 as long as SSP
1492 * has not been enabled. To achieve proper debugging
1493 * output, force the minimum max_page to 1 at least.
1494 */
1495 hdev->max_page = 0x01;
1496
Johan Hedberg2177bab2013-03-05 20:37:43 +02001497 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1498 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001499 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1500 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001501 } else {
1502 struct hci_cp_write_eir cp;
1503
1504 memset(hdev->eir, 0, sizeof(hdev->eir));
1505 memset(&cp, 0, sizeof(cp));
1506
Johan Hedberg42c6b122013-03-05 20:37:49 +02001507 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001508 }
1509 }
1510
1511 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001512 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001513
1514 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001515 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001516
1517 if (lmp_ext_feat_capable(hdev)) {
1518 struct hci_cp_read_local_ext_features cp;
1519
1520 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001521 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1522 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001523 }
1524
1525 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1526 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001527 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1528 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001529 }
1530}
1531
Johan Hedberg42c6b122013-03-05 20:37:49 +02001532static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001533{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001534 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001535 struct hci_cp_write_def_link_policy cp;
1536 u16 link_policy = 0;
1537
1538 if (lmp_rswitch_capable(hdev))
1539 link_policy |= HCI_LP_RSWITCH;
1540 if (lmp_hold_capable(hdev))
1541 link_policy |= HCI_LP_HOLD;
1542 if (lmp_sniff_capable(hdev))
1543 link_policy |= HCI_LP_SNIFF;
1544 if (lmp_park_capable(hdev))
1545 link_policy |= HCI_LP_PARK;
1546
1547 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001548 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001549}
1550
Johan Hedberg42c6b122013-03-05 20:37:49 +02001551static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001552{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001553 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001554 struct hci_cp_write_le_host_supported cp;
1555
Johan Hedbergc73eee92013-04-19 18:35:21 +03001556 /* LE-only devices do not support explicit enablement */
1557 if (!lmp_bredr_capable(hdev))
1558 return;
1559
Johan Hedberg2177bab2013-03-05 20:37:43 +02001560 memset(&cp, 0, sizeof(cp));
1561
1562 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1563 cp.le = 0x01;
1564 cp.simul = lmp_le_br_capable(hdev);
1565 }
1566
1567 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001568 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1569 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001570}
1571
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001572static void hci_set_event_mask_page_2(struct hci_request *req)
1573{
1574 struct hci_dev *hdev = req->hdev;
1575 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1576
1577 /* If Connectionless Slave Broadcast master role is supported
1578 * enable all necessary events for it.
1579 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001580 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001581 events[1] |= 0x40; /* Triggered Clock Capture */
1582 events[1] |= 0x80; /* Synchronization Train Complete */
1583 events[2] |= 0x10; /* Slave Page Response Timeout */
1584 events[2] |= 0x20; /* CSB Channel Map Change */
1585 }
1586
1587 /* If Connectionless Slave Broadcast slave role is supported
1588 * enable all necessary events for it.
1589 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001590 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001591 events[2] |= 0x01; /* Synchronization Train Received */
1592 events[2] |= 0x02; /* CSB Receive */
1593 events[2] |= 0x04; /* CSB Timeout */
1594 events[2] |= 0x08; /* Truncated Page Complete */
1595 }
1596
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001597 /* Enable Authenticated Payload Timeout Expired event if supported */
1598 if (lmp_ping_capable(hdev))
1599 events[2] |= 0x80;
1600
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001601 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1602}
1603
Johan Hedberg42c6b122013-03-05 20:37:49 +02001604static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001605{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001606 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001607 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001608
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001609 /* Some Broadcom based Bluetooth controllers do not support the
1610 * Delete Stored Link Key command. They are clearly indicating its
1611 * absence in the bit mask of supported commands.
1612 *
1613 * Check the supported commands and only if the the command is marked
1614 * as supported send it. If not supported assume that the controller
1615 * does not have actual support for stored link keys which makes this
1616 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001617 *
1618 * Some controllers indicate that they support handling deleting
1619 * stored link keys, but they don't. The quirk lets a driver
1620 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001621 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001622 if (hdev->commands[6] & 0x80 &&
1623 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001624 struct hci_cp_delete_stored_link_key cp;
1625
1626 bacpy(&cp.bdaddr, BDADDR_ANY);
1627 cp.delete_all = 0x01;
1628 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1629 sizeof(cp), &cp);
1630 }
1631
Johan Hedberg2177bab2013-03-05 20:37:43 +02001632 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001633 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001634
Johan Hedberg7bf32042014-02-23 19:42:29 +02001635 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001636 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001637
1638 /* Read features beyond page 1 if available */
1639 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1640 struct hci_cp_read_local_ext_features cp;
1641
1642 cp.page = p;
1643 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1644 sizeof(cp), &cp);
1645 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001646}
1647
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001648static void hci_init4_req(struct hci_request *req, unsigned long opt)
1649{
1650 struct hci_dev *hdev = req->hdev;
1651
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001652 /* Set event mask page 2 if the HCI command for it is supported */
1653 if (hdev->commands[22] & 0x04)
1654 hci_set_event_mask_page_2(req);
1655
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001656 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001657 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001658 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001659
1660 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001661 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001662 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001663 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1664 u8 support = 0x01;
1665 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1666 sizeof(support), &support);
1667 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001668}
1669
Johan Hedberg2177bab2013-03-05 20:37:43 +02001670static int __hci_init(struct hci_dev *hdev)
1671{
1672 int err;
1673
1674 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1675 if (err < 0)
1676 return err;
1677
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001678 /* The Device Under Test (DUT) mode is special and available for
1679 * all controller types. So just create it early on.
1680 */
1681 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1682 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1683 &dut_mode_fops);
1684 }
1685
Johan Hedberg2177bab2013-03-05 20:37:43 +02001686 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1687 * BR/EDR/LE type controllers. AMP controllers only need the
1688 * first stage init.
1689 */
1690 if (hdev->dev_type != HCI_BREDR)
1691 return 0;
1692
1693 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1694 if (err < 0)
1695 return err;
1696
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001697 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1698 if (err < 0)
1699 return err;
1700
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001701 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1702 if (err < 0)
1703 return err;
1704
1705 /* Only create debugfs entries during the initial setup
1706 * phase and not every time the controller gets powered on.
1707 */
1708 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1709 return 0;
1710
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001711 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1712 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001713 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1714 &hdev->manufacturer);
1715 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1716 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001717 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1718 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001719 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1720
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001721 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1722 &conn_info_min_age_fops);
1723 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1724 &conn_info_max_age_fops);
1725
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001726 if (lmp_bredr_capable(hdev)) {
1727 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1728 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001729 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1730 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001731 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1732 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001733 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1734 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001735 }
1736
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001737 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001738 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1739 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001740 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1741 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001742 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1743 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001744 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001745
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001746 if (lmp_sniff_capable(hdev)) {
1747 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1748 hdev, &idle_timeout_fops);
1749 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1750 hdev, &sniff_min_interval_fops);
1751 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1752 hdev, &sniff_max_interval_fops);
1753 }
1754
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001755 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001756 debugfs_create_file("identity", 0400, hdev->debugfs,
1757 hdev, &identity_fops);
1758 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1759 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001760 debugfs_create_file("random_address", 0444, hdev->debugfs,
1761 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001762 debugfs_create_file("static_address", 0444, hdev->debugfs,
1763 hdev, &static_address_fops);
1764
1765 /* For controllers with a public address, provide a debug
1766 * option to force the usage of the configured static
1767 * address. By default the public address is used.
1768 */
1769 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1770 debugfs_create_file("force_static_address", 0644,
1771 hdev->debugfs, hdev,
1772 &force_static_address_fops);
1773
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001774 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1775 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001776 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1777 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001778 debugfs_create_file("identity_resolving_keys", 0400,
1779 hdev->debugfs, hdev,
1780 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001781 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1782 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001783 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1784 hdev, &conn_min_interval_fops);
1785 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1786 hdev, &conn_max_interval_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001787 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1788 hdev, &adv_channel_map_fops);
Andre Guedes7d474e02014-02-26 20:21:54 -03001789 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1790 &le_auto_conn_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001791 debugfs_create_u16("discov_interleaved_timeout", 0644,
1792 hdev->debugfs,
1793 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001794 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001795
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001796 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001797}
1798
Johan Hedberg42c6b122013-03-05 20:37:49 +02001799static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800{
1801 __u8 scan = opt;
1802
Johan Hedberg42c6b122013-03-05 20:37:49 +02001803 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
1805 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001806 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807}
1808
Johan Hedberg42c6b122013-03-05 20:37:49 +02001809static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
1811 __u8 auth = opt;
1812
Johan Hedberg42c6b122013-03-05 20:37:49 +02001813 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001816 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817}
1818
Johan Hedberg42c6b122013-03-05 20:37:49 +02001819static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820{
1821 __u8 encrypt = opt;
1822
Johan Hedberg42c6b122013-03-05 20:37:49 +02001823 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001825 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001826 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827}
1828
Johan Hedberg42c6b122013-03-05 20:37:49 +02001829static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001830{
1831 __le16 policy = cpu_to_le16(opt);
1832
Johan Hedberg42c6b122013-03-05 20:37:49 +02001833 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001834
1835 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001836 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001837}
1838
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001839/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 * Device is held on return. */
1841struct hci_dev *hci_dev_get(int index)
1842{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001843 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
1845 BT_DBG("%d", index);
1846
1847 if (index < 0)
1848 return NULL;
1849
1850 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001851 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852 if (d->id == index) {
1853 hdev = hci_dev_hold(d);
1854 break;
1855 }
1856 }
1857 read_unlock(&hci_dev_list_lock);
1858 return hdev;
1859}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860
1861/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001862
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001863bool hci_discovery_active(struct hci_dev *hdev)
1864{
1865 struct discovery_state *discov = &hdev->discovery;
1866
Andre Guedes6fbe1952012-02-03 17:47:58 -03001867 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001868 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001869 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001870 return true;
1871
Andre Guedes6fbe1952012-02-03 17:47:58 -03001872 default:
1873 return false;
1874 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001875}
1876
Johan Hedbergff9ef572012-01-04 14:23:45 +02001877void hci_discovery_set_state(struct hci_dev *hdev, int state)
1878{
1879 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1880
1881 if (hdev->discovery.state == state)
1882 return;
1883
1884 switch (state) {
1885 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001886 hci_update_background_scan(hdev);
1887
Andre Guedes7b99b652012-02-13 15:41:02 -03001888 if (hdev->discovery.state != DISCOVERY_STARTING)
1889 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001890 break;
1891 case DISCOVERY_STARTING:
1892 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001893 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001894 mgmt_discovering(hdev, 1);
1895 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001896 case DISCOVERY_RESOLVING:
1897 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001898 case DISCOVERY_STOPPING:
1899 break;
1900 }
1901
1902 hdev->discovery.state = state;
1903}
1904
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001905void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906{
Johan Hedberg30883512012-01-04 14:16:21 +02001907 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001908 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909
Johan Hedberg561aafb2012-01-04 13:31:59 +02001910 list_for_each_entry_safe(p, n, &cache->all, all) {
1911 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001912 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001914
1915 INIT_LIST_HEAD(&cache->unknown);
1916 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917}
1918
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001919struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1920 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921{
Johan Hedberg30883512012-01-04 14:16:21 +02001922 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 struct inquiry_entry *e;
1924
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001925 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
Johan Hedberg561aafb2012-01-04 13:31:59 +02001927 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001929 return e;
1930 }
1931
1932 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933}
1934
Johan Hedberg561aafb2012-01-04 13:31:59 +02001935struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001936 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001937{
Johan Hedberg30883512012-01-04 14:16:21 +02001938 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001939 struct inquiry_entry *e;
1940
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001941 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001942
1943 list_for_each_entry(e, &cache->unknown, list) {
1944 if (!bacmp(&e->data.bdaddr, bdaddr))
1945 return e;
1946 }
1947
1948 return NULL;
1949}
1950
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001951struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001952 bdaddr_t *bdaddr,
1953 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001954{
1955 struct discovery_state *cache = &hdev->discovery;
1956 struct inquiry_entry *e;
1957
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001958 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001959
1960 list_for_each_entry(e, &cache->resolve, list) {
1961 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1962 return e;
1963 if (!bacmp(&e->data.bdaddr, bdaddr))
1964 return e;
1965 }
1966
1967 return NULL;
1968}
1969
Johan Hedberga3d4e202012-01-09 00:53:02 +02001970void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001971 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001972{
1973 struct discovery_state *cache = &hdev->discovery;
1974 struct list_head *pos = &cache->resolve;
1975 struct inquiry_entry *p;
1976
1977 list_del(&ie->list);
1978
1979 list_for_each_entry(p, &cache->resolve, list) {
1980 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001981 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001982 break;
1983 pos = &p->list;
1984 }
1985
1986 list_add(&ie->list, pos);
1987}
1988
Johan Hedberg31754052012-01-04 13:39:52 +02001989bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001990 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991{
Johan Hedberg30883512012-01-04 14:16:21 +02001992 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001993 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001995 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Szymon Janc2b2fec42012-11-20 11:38:54 +01001997 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1998
Johan Hedberg01735bb2014-03-25 12:06:18 +02001999 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002000
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002001 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02002002 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02002003 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02002004 *ssp = true;
2005
Johan Hedberga3d4e202012-01-09 00:53:02 +02002006 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002007 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02002008 ie->data.rssi = data->rssi;
2009 hci_inquiry_cache_update_resolve(hdev, ie);
2010 }
2011
Johan Hedberg561aafb2012-01-04 13:31:59 +02002012 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02002013 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002014
Johan Hedberg561aafb2012-01-04 13:31:59 +02002015 /* Entry not in the cache. Add new one. */
2016 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2017 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02002018 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02002019
2020 list_add(&ie->all, &cache->all);
2021
2022 if (name_known) {
2023 ie->name_state = NAME_KNOWN;
2024 } else {
2025 ie->name_state = NAME_NOT_KNOWN;
2026 list_add(&ie->list, &cache->unknown);
2027 }
2028
2029update:
2030 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002031 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02002032 ie->name_state = NAME_KNOWN;
2033 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 }
2035
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002036 memcpy(&ie->data, data, sizeof(*data));
2037 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02002039
2040 if (ie->name_state == NAME_NOT_KNOWN)
2041 return false;
2042
2043 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
2046static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047{
Johan Hedberg30883512012-01-04 14:16:21 +02002048 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
Johan Hedberg561aafb2012-01-04 13:31:59 +02002053 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002055
2056 if (copied >= num)
2057 break;
2058
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002065
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002067 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072}
2073
Johan Hedberg42c6b122013-03-05 20:37:49 +02002074static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075{
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002077 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090}
2091
Andre Guedes3e13fa12013-03-27 20:04:56 -03002092static int wait_inquiry(void *word)
2093{
2094 schedule();
2095 return signal_pending(current);
2096}
2097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098int hci_inquiry(void __user *arg)
2099{
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 return -ENODEV;
2113
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002119 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
Johan Hedberg56f87902013-10-02 13:43:13 +03002129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002134 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002137 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 do_inquiry = 1;
2139 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002140 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Marcel Holtmann04837f62006-07-03 10:02:33 +02002142 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002143
2144 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002147 if (err < 0)
2148 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002156 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002167 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 err = -ENOMEM;
2169 goto done;
2170 }
2171
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002172 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002174 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002181 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002183 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188done:
2189 hci_dev_put(hdev);
2190 return err;
2191}
2192
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002193static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 int ret = 0;
2196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
Johan Hovold94324962012-03-15 14:48:41 +01002201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002234 }
2235
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2250 ret = hdev->setup(hdev);
2251
2252 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002253 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002254 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002255 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 }
2257
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002258 clear_bit(HCI_INIT, &hdev->flags);
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if (!ret) {
2261 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002262 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 set_bit(HCI_UP, &hdev->flags);
2264 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002265 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002266 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002267 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002268 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002269 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002270 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002271 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002272 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002274 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002275 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002276 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
2278 skb_queue_purge(&hdev->cmd_q);
2279 skb_queue_purge(&hdev->rx_q);
2280
2281 if (hdev->flush)
2282 hdev->flush(hdev);
2283
2284 if (hdev->sent_cmd) {
2285 kfree_skb(hdev->sent_cmd);
2286 hdev->sent_cmd = NULL;
2287 }
2288
2289 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002290 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 }
2292
2293done:
2294 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 return ret;
2296}
2297
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002298/* ---- HCI ioctl helpers ---- */
2299
2300int hci_dev_open(__u16 dev)
2301{
2302 struct hci_dev *hdev;
2303 int err;
2304
2305 hdev = hci_dev_get(dev);
2306 if (!hdev)
2307 return -ENODEV;
2308
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002309 /* Devices that are marked for raw-only usage can only be powered
2310 * up as user channel. Trying to bring them up as normal devices
2311 * will result into a failure. Only user channel operation is
2312 * possible.
2313 *
2314 * When this function is called for a user channel, the flag
2315 * HCI_USER_CHANNEL will be set first before attempting to
2316 * open the device.
2317 */
2318 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2319 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2320 err = -EOPNOTSUPP;
2321 goto done;
2322 }
2323
Johan Hedberge1d08f42013-10-01 22:44:50 +03002324 /* We need to ensure that no other power on/off work is pending
2325 * before proceeding to call hci_dev_do_open. This is
2326 * particularly important if the setup procedure has not yet
2327 * completed.
2328 */
2329 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2330 cancel_delayed_work(&hdev->power_off);
2331
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002332 /* After this call it is guaranteed that the setup procedure
2333 * has finished. This means that error conditions like RFKILL
2334 * or no valid public or static random address apply.
2335 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002336 flush_workqueue(hdev->req_workqueue);
2337
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002338 err = hci_dev_do_open(hdev);
2339
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002340done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002341 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002342 return err;
2343}
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345static int hci_dev_do_close(struct hci_dev *hdev)
2346{
2347 BT_DBG("%s %p", hdev->name, hdev);
2348
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002349 cancel_delayed_work(&hdev->power_off);
2350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 hci_req_cancel(hdev, ENODEV);
2352 hci_req_lock(hdev);
2353
2354 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002355 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 hci_req_unlock(hdev);
2357 return 0;
2358 }
2359
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002360 /* Flush RX and TX works */
2361 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002362 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002364 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002365 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002366 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002367 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002368 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002369 }
2370
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002371 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002372 cancel_delayed_work(&hdev->service_cache);
2373
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002374 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002375
2376 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2377 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002378
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002379 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002380 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002382 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002383 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384
2385 hci_notify(hdev, HCI_DEV_DOWN);
2386
2387 if (hdev->flush)
2388 hdev->flush(hdev);
2389
2390 /* Reset device */
2391 skb_queue_purge(&hdev->cmd_q);
2392 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002393 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002394 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002395 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002397 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398 clear_bit(HCI_INIT, &hdev->flags);
2399 }
2400
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002401 /* flush cmd work */
2402 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 /* Drop queues */
2405 skb_queue_purge(&hdev->rx_q);
2406 skb_queue_purge(&hdev->cmd_q);
2407 skb_queue_purge(&hdev->raw_q);
2408
2409 /* Drop last sent command */
2410 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002411 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002412 kfree_skb(hdev->sent_cmd);
2413 hdev->sent_cmd = NULL;
2414 }
2415
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002416 kfree_skb(hdev->recv_evt);
2417 hdev->recv_evt = NULL;
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 /* After this point our queues are empty
2420 * and no tasks are scheduled. */
2421 hdev->close(hdev);
2422
Johan Hedberg35b973c2013-03-15 17:06:59 -05002423 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002424 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002425 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2426
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002427 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2428 if (hdev->dev_type == HCI_BREDR) {
2429 hci_dev_lock(hdev);
2430 mgmt_powered(hdev, 0);
2431 hci_dev_unlock(hdev);
2432 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002433 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002434
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002435 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002436 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002437
Johan Hedberge59fda82012-02-22 18:11:53 +02002438 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002439 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002440 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002441
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 hci_req_unlock(hdev);
2443
2444 hci_dev_put(hdev);
2445 return 0;
2446}
2447
2448int hci_dev_close(__u16 dev)
2449{
2450 struct hci_dev *hdev;
2451 int err;
2452
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002453 hdev = hci_dev_get(dev);
2454 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002456
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002457 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2458 err = -EBUSY;
2459 goto done;
2460 }
2461
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002462 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2463 cancel_delayed_work(&hdev->power_off);
2464
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002466
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002467done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 hci_dev_put(hdev);
2469 return err;
2470}
2471
2472int hci_dev_reset(__u16 dev)
2473{
2474 struct hci_dev *hdev;
2475 int ret = 0;
2476
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002477 hdev = hci_dev_get(dev);
2478 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 return -ENODEV;
2480
2481 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482
Marcel Holtmann808a0492013-08-26 20:57:58 -07002483 if (!test_bit(HCI_UP, &hdev->flags)) {
2484 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002488 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2489 ret = -EBUSY;
2490 goto done;
2491 }
2492
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002493 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2494 ret = -EOPNOTSUPP;
2495 goto done;
2496 }
2497
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 /* Drop queues */
2499 skb_queue_purge(&hdev->rx_q);
2500 skb_queue_purge(&hdev->cmd_q);
2501
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002502 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002503 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002505 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506
2507 if (hdev->flush)
2508 hdev->flush(hdev);
2509
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002510 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002511 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002513 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 hci_req_unlock(hdev);
2517 hci_dev_put(hdev);
2518 return ret;
2519}
2520
2521int hci_dev_reset_stat(__u16 dev)
2522{
2523 struct hci_dev *hdev;
2524 int ret = 0;
2525
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002526 hdev = hci_dev_get(dev);
2527 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 return -ENODEV;
2529
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002530 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2531 ret = -EBUSY;
2532 goto done;
2533 }
2534
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002535 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2536 ret = -EOPNOTSUPP;
2537 goto done;
2538 }
2539
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2541
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002542done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 return ret;
2545}
2546
2547int hci_dev_cmd(unsigned int cmd, void __user *arg)
2548{
2549 struct hci_dev *hdev;
2550 struct hci_dev_req dr;
2551 int err = 0;
2552
2553 if (copy_from_user(&dr, arg, sizeof(dr)))
2554 return -EFAULT;
2555
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002556 hdev = hci_dev_get(dr.dev_id);
2557 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 return -ENODEV;
2559
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002560 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2561 err = -EBUSY;
2562 goto done;
2563 }
2564
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002565 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2566 err = -EOPNOTSUPP;
2567 goto done;
2568 }
2569
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002570 if (hdev->dev_type != HCI_BREDR) {
2571 err = -EOPNOTSUPP;
2572 goto done;
2573 }
2574
Johan Hedberg56f87902013-10-02 13:43:13 +03002575 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2576 err = -EOPNOTSUPP;
2577 goto done;
2578 }
2579
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 switch (cmd) {
2581 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002582 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2583 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 break;
2585
2586 case HCISETENCRYPT:
2587 if (!lmp_encrypt_capable(hdev)) {
2588 err = -EOPNOTSUPP;
2589 break;
2590 }
2591
2592 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2593 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002594 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2595 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596 if (err)
2597 break;
2598 }
2599
Johan Hedberg01178cd2013-03-05 20:37:41 +02002600 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2601 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 break;
2603
2604 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002605 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2606 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607 break;
2608
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002609 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002610 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2611 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002612 break;
2613
2614 case HCISETLINKMODE:
2615 hdev->link_mode = ((__u16) dr.dev_opt) &
2616 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2617 break;
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 case HCISETPTYPE:
2620 hdev->pkt_type = (__u16) dr.dev_opt;
2621 break;
2622
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002624 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2625 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 break;
2627
2628 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002629 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2630 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 break;
2632
2633 default:
2634 err = -EINVAL;
2635 break;
2636 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002637
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002638done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 hci_dev_put(hdev);
2640 return err;
2641}
2642
2643int hci_get_dev_list(void __user *arg)
2644{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002645 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 struct hci_dev_list_req *dl;
2647 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 int n = 0, size, err;
2649 __u16 dev_num;
2650
2651 if (get_user(dev_num, (__u16 __user *) arg))
2652 return -EFAULT;
2653
2654 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2655 return -EINVAL;
2656
2657 size = sizeof(*dl) + dev_num * sizeof(*dr);
2658
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002659 dl = kzalloc(size, GFP_KERNEL);
2660 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661 return -ENOMEM;
2662
2663 dr = dl->dev_req;
2664
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002665 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002666 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002667 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002668 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002669
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002670 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2671 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 (dr + n)->dev_id = hdev->id;
2674 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 if (++n >= dev_num)
2677 break;
2678 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002679 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
2681 dl->dev_num = n;
2682 size = sizeof(*dl) + n * sizeof(*dr);
2683
2684 err = copy_to_user(arg, dl, size);
2685 kfree(dl);
2686
2687 return err ? -EFAULT : 0;
2688}
2689
2690int hci_get_dev_info(void __user *arg)
2691{
2692 struct hci_dev *hdev;
2693 struct hci_dev_info di;
2694 int err = 0;
2695
2696 if (copy_from_user(&di, arg, sizeof(di)))
2697 return -EFAULT;
2698
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002699 hdev = hci_dev_get(di.dev_id);
2700 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002701 return -ENODEV;
2702
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002703 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002704 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002705
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002706 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2707 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002708
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 strcpy(di.name, hdev->name);
2710 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002711 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 di.flags = hdev->flags;
2713 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002714 if (lmp_bredr_capable(hdev)) {
2715 di.acl_mtu = hdev->acl_mtu;
2716 di.acl_pkts = hdev->acl_pkts;
2717 di.sco_mtu = hdev->sco_mtu;
2718 di.sco_pkts = hdev->sco_pkts;
2719 } else {
2720 di.acl_mtu = hdev->le_mtu;
2721 di.acl_pkts = hdev->le_pkts;
2722 di.sco_mtu = 0;
2723 di.sco_pkts = 0;
2724 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 di.link_policy = hdev->link_policy;
2726 di.link_mode = hdev->link_mode;
2727
2728 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2729 memcpy(&di.features, &hdev->features, sizeof(di.features));
2730
2731 if (copy_to_user(arg, &di, sizeof(di)))
2732 err = -EFAULT;
2733
2734 hci_dev_put(hdev);
2735
2736 return err;
2737}
2738
2739/* ---- Interface to HCI drivers ---- */
2740
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002741static int hci_rfkill_set_block(void *data, bool blocked)
2742{
2743 struct hci_dev *hdev = data;
2744
2745 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2746
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002747 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2748 return -EBUSY;
2749
Johan Hedberg5e130362013-09-13 08:58:17 +03002750 if (blocked) {
2751 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002752 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2753 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002754 } else {
2755 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002756 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002757
2758 return 0;
2759}
2760
2761static const struct rfkill_ops hci_rfkill_ops = {
2762 .set_block = hci_rfkill_set_block,
2763};
2764
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002765static void hci_power_on(struct work_struct *work)
2766{
2767 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002768 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002769
2770 BT_DBG("%s", hdev->name);
2771
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002772 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002773 if (err < 0) {
2774 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002775 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002776 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002777
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002778 /* During the HCI setup phase, a few error conditions are
2779 * ignored and they need to be checked now. If they are still
2780 * valid, it is important to turn the device back off.
2781 */
2782 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2783 (hdev->dev_type == HCI_BREDR &&
2784 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2785 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002786 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2787 hci_dev_do_close(hdev);
2788 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002789 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2790 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002791 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002792
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002793 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2794 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2795 mgmt_index_added(hdev);
2796 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002797}
2798
2799static void hci_power_off(struct work_struct *work)
2800{
Johan Hedberg32435532011-11-07 22:16:04 +02002801 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002802 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002803
2804 BT_DBG("%s", hdev->name);
2805
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002806 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002807}
2808
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002809static void hci_discov_off(struct work_struct *work)
2810{
2811 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002812
2813 hdev = container_of(work, struct hci_dev, discov_off.work);
2814
2815 BT_DBG("%s", hdev->name);
2816
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002817 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002818}
2819
Johan Hedberg35f74982014-02-18 17:14:32 +02002820void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002821{
Johan Hedberg48210022013-01-27 00:31:28 +02002822 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002823
Johan Hedberg48210022013-01-27 00:31:28 +02002824 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2825 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002826 kfree(uuid);
2827 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002828}
2829
Johan Hedberg35f74982014-02-18 17:14:32 +02002830void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002831{
2832 struct list_head *p, *n;
2833
2834 list_for_each_safe(p, n, &hdev->link_keys) {
2835 struct link_key *key;
2836
2837 key = list_entry(p, struct link_key, list);
2838
2839 list_del(p);
2840 kfree(key);
2841 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002842}
2843
Johan Hedberg35f74982014-02-18 17:14:32 +02002844void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002845{
2846 struct smp_ltk *k, *tmp;
2847
2848 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2849 list_del(&k->list);
2850 kfree(k);
2851 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002852}
2853
Johan Hedberg970c4e42014-02-18 10:19:33 +02002854void hci_smp_irks_clear(struct hci_dev *hdev)
2855{
2856 struct smp_irk *k, *tmp;
2857
2858 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2859 list_del(&k->list);
2860 kfree(k);
2861 }
2862}
2863
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002864struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2865{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002866 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002867
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002868 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002869 if (bacmp(bdaddr, &k->bdaddr) == 0)
2870 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002871
2872 return NULL;
2873}
2874
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302875static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002876 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002877{
2878 /* Legacy key */
2879 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302880 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002881
2882 /* Debug keys are insecure so don't store them persistently */
2883 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302884 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002885
2886 /* Changed combination key and there's no previous one */
2887 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302888 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002889
2890 /* Security mode 3 case */
2891 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302892 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002893
2894 /* Neither local nor remote side had no-bonding as requirement */
2895 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302896 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002897
2898 /* Local side had dedicated bonding as requirement */
2899 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302900 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002901
2902 /* Remote side had dedicated bonding as requirement */
2903 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302904 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002905
2906 /* If none of the above criteria match, then don't store the key
2907 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302908 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002909}
2910
Johan Hedberg98a0b842014-01-30 19:40:00 -08002911static bool ltk_type_master(u8 type)
2912{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002913 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002914}
2915
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002916struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002917 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002918{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002919 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002920
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002921 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002922 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002923 continue;
2924
Johan Hedberg98a0b842014-01-30 19:40:00 -08002925 if (ltk_type_master(k->type) != master)
2926 continue;
2927
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002928 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002929 }
2930
2931 return NULL;
2932}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002933
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002934struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002935 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002936{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002937 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002938
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002939 list_for_each_entry(k, &hdev->long_term_keys, list)
2940 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002941 bacmp(bdaddr, &k->bdaddr) == 0 &&
2942 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002943 return k;
2944
2945 return NULL;
2946}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002947
Johan Hedberg970c4e42014-02-18 10:19:33 +02002948struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2949{
2950 struct smp_irk *irk;
2951
2952 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2953 if (!bacmp(&irk->rpa, rpa))
2954 return irk;
2955 }
2956
2957 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2958 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2959 bacpy(&irk->rpa, rpa);
2960 return irk;
2961 }
2962 }
2963
2964 return NULL;
2965}
2966
2967struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2968 u8 addr_type)
2969{
2970 struct smp_irk *irk;
2971
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002972 /* Identity Address must be public or static random */
2973 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2974 return NULL;
2975
Johan Hedberg970c4e42014-02-18 10:19:33 +02002976 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2977 if (addr_type == irk->addr_type &&
2978 bacmp(bdaddr, &irk->bdaddr) == 0)
2979 return irk;
2980 }
2981
2982 return NULL;
2983}
2984
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002985struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002986 bdaddr_t *bdaddr, u8 *val, u8 type,
2987 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002988{
2989 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302990 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002991
2992 old_key = hci_find_link_key(hdev, bdaddr);
2993 if (old_key) {
2994 old_key_type = old_key->type;
2995 key = old_key;
2996 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002997 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002998 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002999 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003000 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003001 list_add(&key->list, &hdev->link_keys);
3002 }
3003
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003004 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003005
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003006 /* Some buggy controller combinations generate a changed
3007 * combination key for legacy pairing even when there's no
3008 * previous key */
3009 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003010 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003011 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07003012 if (conn)
3013 conn->key_type = type;
3014 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07003015
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003016 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03003017 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003018 key->pin_len = pin_len;
3019
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02003020 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003021 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07003022 else
3023 key->type = type;
3024
Johan Hedberg7652ff62014-06-24 13:15:49 +03003025 if (persistent)
3026 *persistent = hci_persistent_key(hdev, conn, type,
3027 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003028
Johan Hedberg567fa2a2014-06-24 13:15:48 +03003029 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003030}
3031
Johan Hedbergca9142b2014-02-19 14:57:44 +02003032struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02003033 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003034 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003035{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003036 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08003037 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003038
Johan Hedberg98a0b842014-01-30 19:40:00 -08003039 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003040 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003041 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003042 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003043 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003044 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003045 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003046 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003047 }
3048
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003049 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003050 key->bdaddr_type = addr_type;
3051 memcpy(key->val, tk, sizeof(key->val));
3052 key->authenticated = authenticated;
3053 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003054 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003055 key->enc_size = enc_size;
3056 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003057
Johan Hedbergca9142b2014-02-19 14:57:44 +02003058 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003059}
3060
Johan Hedbergca9142b2014-02-19 14:57:44 +02003061struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3062 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003063{
3064 struct smp_irk *irk;
3065
3066 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3067 if (!irk) {
3068 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3069 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003070 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003071
3072 bacpy(&irk->bdaddr, bdaddr);
3073 irk->addr_type = addr_type;
3074
3075 list_add(&irk->list, &hdev->identity_resolving_keys);
3076 }
3077
3078 memcpy(irk->val, val, 16);
3079 bacpy(&irk->rpa, rpa);
3080
Johan Hedbergca9142b2014-02-19 14:57:44 +02003081 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003082}
3083
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003084int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3085{
3086 struct link_key *key;
3087
3088 key = hci_find_link_key(hdev, bdaddr);
3089 if (!key)
3090 return -ENOENT;
3091
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003092 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003093
3094 list_del(&key->list);
3095 kfree(key);
3096
3097 return 0;
3098}
3099
Johan Hedberge0b2b272014-02-18 17:14:31 +02003100int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003101{
3102 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003103 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003104
3105 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003106 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003107 continue;
3108
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003109 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003110
3111 list_del(&k->list);
3112 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003113 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003114 }
3115
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003116 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003117}
3118
Johan Hedberga7ec7332014-02-18 17:14:35 +02003119void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3120{
3121 struct smp_irk *k, *tmp;
3122
Johan Hedberg668b7b12014-02-21 16:03:31 +02003123 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003124 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3125 continue;
3126
3127 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3128
3129 list_del(&k->list);
3130 kfree(k);
3131 }
3132}
3133
Ville Tervo6bd32322011-02-16 16:32:41 +02003134/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003135static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003136{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003137 struct hci_dev *hdev = container_of(work, struct hci_dev,
3138 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003139
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003140 if (hdev->sent_cmd) {
3141 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3142 u16 opcode = __le16_to_cpu(sent->opcode);
3143
3144 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3145 } else {
3146 BT_ERR("%s command tx timeout", hdev->name);
3147 }
3148
Ville Tervo6bd32322011-02-16 16:32:41 +02003149 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003150 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003151}
3152
Szymon Janc2763eda2011-03-22 13:12:22 +01003153struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003154 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003155{
3156 struct oob_data *data;
3157
3158 list_for_each_entry(data, &hdev->remote_oob_data, list)
3159 if (bacmp(bdaddr, &data->bdaddr) == 0)
3160 return data;
3161
3162 return NULL;
3163}
3164
3165int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3166{
3167 struct oob_data *data;
3168
3169 data = hci_find_remote_oob_data(hdev, bdaddr);
3170 if (!data)
3171 return -ENOENT;
3172
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003173 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003174
3175 list_del(&data->list);
3176 kfree(data);
3177
3178 return 0;
3179}
3180
Johan Hedberg35f74982014-02-18 17:14:32 +02003181void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003182{
3183 struct oob_data *data, *n;
3184
3185 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3186 list_del(&data->list);
3187 kfree(data);
3188 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003189}
3190
Marcel Holtmann07988722014-01-10 02:07:29 -08003191int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3192 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003193{
3194 struct oob_data *data;
3195
3196 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003197 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003198 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003199 if (!data)
3200 return -ENOMEM;
3201
3202 bacpy(&data->bdaddr, bdaddr);
3203 list_add(&data->list, &hdev->remote_oob_data);
3204 }
3205
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003206 memcpy(data->hash192, hash, sizeof(data->hash192));
3207 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003208
Marcel Holtmann07988722014-01-10 02:07:29 -08003209 memset(data->hash256, 0, sizeof(data->hash256));
3210 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3211
3212 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3213
3214 return 0;
3215}
3216
3217int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3218 u8 *hash192, u8 *randomizer192,
3219 u8 *hash256, u8 *randomizer256)
3220{
3221 struct oob_data *data;
3222
3223 data = hci_find_remote_oob_data(hdev, bdaddr);
3224 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003225 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003226 if (!data)
3227 return -ENOMEM;
3228
3229 bacpy(&data->bdaddr, bdaddr);
3230 list_add(&data->list, &hdev->remote_oob_data);
3231 }
3232
3233 memcpy(data->hash192, hash192, sizeof(data->hash192));
3234 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3235
3236 memcpy(data->hash256, hash256, sizeof(data->hash256));
3237 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3238
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003239 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003240
3241 return 0;
3242}
3243
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003244struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3245 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003246{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003247 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003248
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003249 list_for_each_entry(b, &hdev->blacklist, list) {
3250 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003251 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003252 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003253
3254 return NULL;
3255}
3256
Marcel Holtmannc9507492014-02-27 19:35:54 -08003257static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003258{
3259 struct list_head *p, *n;
3260
3261 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003262 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003263
3264 list_del(p);
3265 kfree(b);
3266 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003267}
3268
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003269int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003270{
3271 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003272
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003273 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003274 return -EBADF;
3275
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003276 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003277 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003278
3279 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003280 if (!entry)
3281 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003282
3283 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003284 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003285
3286 list_add(&entry->list, &hdev->blacklist);
3287
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003288 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003289}
3290
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003291int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003292{
3293 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003294
Johan Hedberg35f74982014-02-18 17:14:32 +02003295 if (!bacmp(bdaddr, BDADDR_ANY)) {
3296 hci_blacklist_clear(hdev);
3297 return 0;
3298 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003299
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003300 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003301 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003302 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003303
3304 list_del(&entry->list);
3305 kfree(entry);
3306
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003307 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003308}
3309
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003310struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3311 bdaddr_t *bdaddr, u8 type)
3312{
3313 struct bdaddr_list *b;
3314
3315 list_for_each_entry(b, &hdev->le_white_list, list) {
3316 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3317 return b;
3318 }
3319
3320 return NULL;
3321}
3322
3323void hci_white_list_clear(struct hci_dev *hdev)
3324{
3325 struct list_head *p, *n;
3326
3327 list_for_each_safe(p, n, &hdev->le_white_list) {
3328 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3329
3330 list_del(p);
3331 kfree(b);
3332 }
3333}
3334
3335int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3336{
3337 struct bdaddr_list *entry;
3338
3339 if (!bacmp(bdaddr, BDADDR_ANY))
3340 return -EBADF;
3341
3342 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3343 if (!entry)
3344 return -ENOMEM;
3345
3346 bacpy(&entry->bdaddr, bdaddr);
3347 entry->bdaddr_type = type;
3348
3349 list_add(&entry->list, &hdev->le_white_list);
3350
3351 return 0;
3352}
3353
3354int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3355{
3356 struct bdaddr_list *entry;
3357
3358 if (!bacmp(bdaddr, BDADDR_ANY))
3359 return -EBADF;
3360
3361 entry = hci_white_list_lookup(hdev, bdaddr, type);
3362 if (!entry)
3363 return -ENOENT;
3364
3365 list_del(&entry->list);
3366 kfree(entry);
3367
3368 return 0;
3369}
3370
Andre Guedes15819a72014-02-03 13:56:18 -03003371/* This function requires the caller holds hdev->lock */
3372struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3373 bdaddr_t *addr, u8 addr_type)
3374{
3375 struct hci_conn_params *params;
3376
3377 list_for_each_entry(params, &hdev->le_conn_params, list) {
3378 if (bacmp(&params->addr, addr) == 0 &&
3379 params->addr_type == addr_type) {
3380 return params;
3381 }
3382 }
3383
3384 return NULL;
3385}
3386
Andre Guedescef952c2014-02-26 20:21:49 -03003387static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3388{
3389 struct hci_conn *conn;
3390
3391 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3392 if (!conn)
3393 return false;
3394
3395 if (conn->dst_type != type)
3396 return false;
3397
3398 if (conn->state != BT_CONNECTED)
3399 return false;
3400
3401 return true;
3402}
3403
Andre Guedesa9b0a042014-02-26 20:21:52 -03003404static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3405{
3406 if (addr_type == ADDR_LE_DEV_PUBLIC)
3407 return true;
3408
3409 /* Check for Random Static address type */
3410 if ((addr->b[5] & 0xc0) == 0xc0)
3411 return true;
3412
3413 return false;
3414}
3415
Andre Guedes15819a72014-02-03 13:56:18 -03003416/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003417struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3418 bdaddr_t *addr, u8 addr_type)
3419{
3420 struct bdaddr_list *entry;
3421
3422 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3423 if (bacmp(&entry->bdaddr, addr) == 0 &&
3424 entry->bdaddr_type == addr_type)
3425 return entry;
3426 }
3427
3428 return NULL;
3429}
3430
3431/* This function requires the caller holds hdev->lock */
3432void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3433{
3434 struct bdaddr_list *entry;
3435
3436 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3437 if (entry)
3438 goto done;
3439
3440 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3441 if (!entry) {
3442 BT_ERR("Out of memory");
3443 return;
3444 }
3445
3446 bacpy(&entry->bdaddr, addr);
3447 entry->bdaddr_type = addr_type;
3448
3449 list_add(&entry->list, &hdev->pend_le_conns);
3450
3451 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3452
3453done:
3454 hci_update_background_scan(hdev);
3455}
3456
3457/* This function requires the caller holds hdev->lock */
3458void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3459{
3460 struct bdaddr_list *entry;
3461
3462 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3463 if (!entry)
3464 goto done;
3465
3466 list_del(&entry->list);
3467 kfree(entry);
3468
3469 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3470
3471done:
3472 hci_update_background_scan(hdev);
3473}
3474
3475/* This function requires the caller holds hdev->lock */
3476void hci_pend_le_conns_clear(struct hci_dev *hdev)
3477{
3478 struct bdaddr_list *entry, *tmp;
3479
3480 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3481 list_del(&entry->list);
3482 kfree(entry);
3483 }
3484
3485 BT_DBG("All LE pending connections cleared");
3486}
3487
3488/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003489int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3490 u8 auto_connect, u16 conn_min_interval,
3491 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003492{
3493 struct hci_conn_params *params;
3494
Andre Guedesa9b0a042014-02-26 20:21:52 -03003495 if (!is_identity_address(addr, addr_type))
3496 return -EINVAL;
3497
Andre Guedes15819a72014-02-03 13:56:18 -03003498 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003499 if (params)
3500 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003501
3502 params = kzalloc(sizeof(*params), GFP_KERNEL);
3503 if (!params) {
3504 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003505 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003506 }
3507
3508 bacpy(&params->addr, addr);
3509 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003510
3511 list_add(&params->list, &hdev->le_conn_params);
3512
3513update:
Andre Guedes15819a72014-02-03 13:56:18 -03003514 params->conn_min_interval = conn_min_interval;
3515 params->conn_max_interval = conn_max_interval;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003516 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003517
Andre Guedescef952c2014-02-26 20:21:49 -03003518 switch (auto_connect) {
3519 case HCI_AUTO_CONN_DISABLED:
3520 case HCI_AUTO_CONN_LINK_LOSS:
3521 hci_pend_le_conn_del(hdev, addr, addr_type);
3522 break;
3523 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type))
3525 hci_pend_le_conn_add(hdev, addr, addr_type);
3526 break;
3527 }
Andre Guedes15819a72014-02-03 13:56:18 -03003528
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003529 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3530 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3531 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003532
3533 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003534}
3535
3536/* This function requires the caller holds hdev->lock */
3537void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3538{
3539 struct hci_conn_params *params;
3540
3541 params = hci_conn_params_lookup(hdev, addr, addr_type);
3542 if (!params)
3543 return;
3544
Andre Guedescef952c2014-02-26 20:21:49 -03003545 hci_pend_le_conn_del(hdev, addr, addr_type);
3546
Andre Guedes15819a72014-02-03 13:56:18 -03003547 list_del(&params->list);
3548 kfree(params);
3549
3550 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3551}
3552
3553/* This function requires the caller holds hdev->lock */
3554void hci_conn_params_clear(struct hci_dev *hdev)
3555{
3556 struct hci_conn_params *params, *tmp;
3557
3558 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3559 list_del(&params->list);
3560 kfree(params);
3561 }
3562
Marcel Holtmann1089b672014-06-29 13:41:50 +02003563 hci_pend_le_conns_clear(hdev);
3564
Andre Guedes15819a72014-02-03 13:56:18 -03003565 BT_DBG("All LE connection parameters were removed");
3566}
3567
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003568static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003569{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003570 if (status) {
3571 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003572
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003573 hci_dev_lock(hdev);
3574 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3575 hci_dev_unlock(hdev);
3576 return;
3577 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003578}
3579
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003580static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003581{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003582 /* General inquiry access code (GIAC) */
3583 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3584 struct hci_request req;
3585 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003586 int err;
3587
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003588 if (status) {
3589 BT_ERR("Failed to disable LE scanning: status %d", status);
3590 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003591 }
3592
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003593 switch (hdev->discovery.type) {
3594 case DISCOV_TYPE_LE:
3595 hci_dev_lock(hdev);
3596 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3597 hci_dev_unlock(hdev);
3598 break;
3599
3600 case DISCOV_TYPE_INTERLEAVED:
3601 hci_req_init(&req, hdev);
3602
3603 memset(&cp, 0, sizeof(cp));
3604 memcpy(&cp.lap, lap, sizeof(cp.lap));
3605 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3606 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3607
3608 hci_dev_lock(hdev);
3609
3610 hci_inquiry_cache_flush(hdev);
3611
3612 err = hci_req_run(&req, inquiry_complete);
3613 if (err) {
3614 BT_ERR("Inquiry request failed: err %d", err);
3615 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3616 }
3617
3618 hci_dev_unlock(hdev);
3619 break;
3620 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003621}
3622
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003623static void le_scan_disable_work(struct work_struct *work)
3624{
3625 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003626 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003627 struct hci_request req;
3628 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003629
3630 BT_DBG("%s", hdev->name);
3631
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003632 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003633
Andre Guedesb1efcc22014-02-26 20:21:40 -03003634 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003635
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003636 err = hci_req_run(&req, le_scan_disable_work_complete);
3637 if (err)
3638 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003639}
3640
Johan Hedberg8d972502014-02-28 12:54:14 +02003641static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3642{
3643 struct hci_dev *hdev = req->hdev;
3644
3645 /* If we're advertising or initiating an LE connection we can't
3646 * go ahead and change the random address at this time. This is
3647 * because the eventual initiator address used for the
3648 * subsequently created connection will be undefined (some
3649 * controllers use the new address and others the one we had
3650 * when the operation started).
3651 *
3652 * In this kind of scenario skip the update and let the random
3653 * address be updated at the next cycle.
3654 */
3655 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3656 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3657 BT_DBG("Deferring random address update");
3658 return;
3659 }
3660
3661 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3662}
3663
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003664int hci_update_random_address(struct hci_request *req, bool require_privacy,
3665 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003666{
3667 struct hci_dev *hdev = req->hdev;
3668 int err;
3669
3670 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003671 * current RPA has expired or there is something else than
3672 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003673 */
3674 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003675 int to;
3676
3677 *own_addr_type = ADDR_LE_DEV_RANDOM;
3678
3679 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003680 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003681 return 0;
3682
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003683 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003684 if (err < 0) {
3685 BT_ERR("%s failed to generate new RPA", hdev->name);
3686 return err;
3687 }
3688
Johan Hedberg8d972502014-02-28 12:54:14 +02003689 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003690
3691 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3692 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3693
3694 return 0;
3695 }
3696
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003697 /* In case of required privacy without resolvable private address,
3698 * use an unresolvable private address. This is useful for active
3699 * scanning and non-connectable advertising.
3700 */
3701 if (require_privacy) {
3702 bdaddr_t urpa;
3703
3704 get_random_bytes(&urpa, 6);
3705 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3706
3707 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003708 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003709 return 0;
3710 }
3711
Johan Hedbergebd3a742014-02-23 19:42:21 +02003712 /* If forcing static address is in use or there is no public
3713 * address use the static address as random address (but skip
3714 * the HCI command if the current random address is already the
3715 * static one.
3716 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003717 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003718 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3719 *own_addr_type = ADDR_LE_DEV_RANDOM;
3720 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3721 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3722 &hdev->static_addr);
3723 return 0;
3724 }
3725
3726 /* Neither privacy nor static address is being used so use a
3727 * public address.
3728 */
3729 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3730
3731 return 0;
3732}
3733
Johan Hedberga1f4c312014-02-27 14:05:41 +02003734/* Copy the Identity Address of the controller.
3735 *
3736 * If the controller has a public BD_ADDR, then by default use that one.
3737 * If this is a LE only controller without a public address, default to
3738 * the static random address.
3739 *
3740 * For debugging purposes it is possible to force controllers with a
3741 * public address to use the static random address instead.
3742 */
3743void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3744 u8 *bdaddr_type)
3745{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003746 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003747 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3748 bacpy(bdaddr, &hdev->static_addr);
3749 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3750 } else {
3751 bacpy(bdaddr, &hdev->bdaddr);
3752 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3753 }
3754}
3755
David Herrmann9be0dab2012-04-22 14:39:57 +02003756/* Alloc HCI device */
3757struct hci_dev *hci_alloc_dev(void)
3758{
3759 struct hci_dev *hdev;
3760
3761 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3762 if (!hdev)
3763 return NULL;
3764
David Herrmannb1b813d2012-04-22 14:39:58 +02003765 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3766 hdev->esco_type = (ESCO_HV1);
3767 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003768 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3769 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003770 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3771 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003772
David Herrmannb1b813d2012-04-22 14:39:58 +02003773 hdev->sniff_max_interval = 800;
3774 hdev->sniff_min_interval = 80;
3775
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003776 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003777 hdev->le_scan_interval = 0x0060;
3778 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003779 hdev->le_conn_min_interval = 0x0028;
3780 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003781
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003782 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003783 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003784 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3785 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003786
David Herrmannb1b813d2012-04-22 14:39:58 +02003787 mutex_init(&hdev->lock);
3788 mutex_init(&hdev->req_lock);
3789
3790 INIT_LIST_HEAD(&hdev->mgmt_pending);
3791 INIT_LIST_HEAD(&hdev->blacklist);
3792 INIT_LIST_HEAD(&hdev->uuids);
3793 INIT_LIST_HEAD(&hdev->link_keys);
3794 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003795 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003796 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003797 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003798 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003799 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003800 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003801
3802 INIT_WORK(&hdev->rx_work, hci_rx_work);
3803 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3804 INIT_WORK(&hdev->tx_work, hci_tx_work);
3805 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003806
David Herrmannb1b813d2012-04-22 14:39:58 +02003807 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3808 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3809 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3810
David Herrmannb1b813d2012-04-22 14:39:58 +02003811 skb_queue_head_init(&hdev->rx_q);
3812 skb_queue_head_init(&hdev->cmd_q);
3813 skb_queue_head_init(&hdev->raw_q);
3814
3815 init_waitqueue_head(&hdev->req_wait_q);
3816
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003817 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003818
David Herrmannb1b813d2012-04-22 14:39:58 +02003819 hci_init_sysfs(hdev);
3820 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003821
3822 return hdev;
3823}
3824EXPORT_SYMBOL(hci_alloc_dev);
3825
3826/* Free HCI device */
3827void hci_free_dev(struct hci_dev *hdev)
3828{
David Herrmann9be0dab2012-04-22 14:39:57 +02003829 /* will free via device release */
3830 put_device(&hdev->dev);
3831}
3832EXPORT_SYMBOL(hci_free_dev);
3833
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834/* Register HCI device */
3835int hci_register_dev(struct hci_dev *hdev)
3836{
David Herrmannb1b813d2012-04-22 14:39:58 +02003837 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
David Herrmann010666a2012-01-07 15:47:07 +01003839 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 return -EINVAL;
3841
Mat Martineau08add512011-11-02 16:18:36 -07003842 /* Do not allow HCI_AMP devices to register at index 0,
3843 * so the index can be used as the AMP controller ID.
3844 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003845 switch (hdev->dev_type) {
3846 case HCI_BREDR:
3847 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3848 break;
3849 case HCI_AMP:
3850 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3851 break;
3852 default:
3853 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003854 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003855
Sasha Levin3df92b32012-05-27 22:36:56 +02003856 if (id < 0)
3857 return id;
3858
Linus Torvalds1da177e2005-04-16 15:20:36 -07003859 sprintf(hdev->name, "hci%d", id);
3860 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003861
3862 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3863
Kees Cookd8537542013-07-03 15:04:57 -07003864 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3865 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003866 if (!hdev->workqueue) {
3867 error = -ENOMEM;
3868 goto err;
3869 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003870
Kees Cookd8537542013-07-03 15:04:57 -07003871 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3872 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003873 if (!hdev->req_workqueue) {
3874 destroy_workqueue(hdev->workqueue);
3875 error = -ENOMEM;
3876 goto err;
3877 }
3878
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003879 if (!IS_ERR_OR_NULL(bt_debugfs))
3880 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3881
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003882 dev_set_name(&hdev->dev, "%s", hdev->name);
3883
Johan Hedberg99780a72014-02-18 10:40:07 +02003884 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3885 CRYPTO_ALG_ASYNC);
3886 if (IS_ERR(hdev->tfm_aes)) {
3887 BT_ERR("Unable to create crypto context");
3888 error = PTR_ERR(hdev->tfm_aes);
3889 hdev->tfm_aes = NULL;
3890 goto err_wqueue;
3891 }
3892
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003893 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003894 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003895 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003897 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003898 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3899 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003900 if (hdev->rfkill) {
3901 if (rfkill_register(hdev->rfkill) < 0) {
3902 rfkill_destroy(hdev->rfkill);
3903 hdev->rfkill = NULL;
3904 }
3905 }
3906
Johan Hedberg5e130362013-09-13 08:58:17 +03003907 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3908 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3909
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003910 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003911 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003912
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003913 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003914 /* Assume BR/EDR support until proven otherwise (such as
3915 * through reading supported features during init.
3916 */
3917 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3918 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003919
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003920 write_lock(&hci_dev_list_lock);
3921 list_add(&hdev->list, &hci_dev_list);
3922 write_unlock(&hci_dev_list_lock);
3923
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003924 /* Devices that are marked for raw-only usage need to set
3925 * the HCI_RAW flag to indicate that only user channel is
3926 * supported.
3927 */
3928 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3929 set_bit(HCI_RAW, &hdev->flags);
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003932 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933
Johan Hedberg19202572013-01-14 22:33:51 +02003934 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003935
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003937
Johan Hedberg99780a72014-02-18 10:40:07 +02003938err_tfm:
3939 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003940err_wqueue:
3941 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003942 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003943err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003944 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003945
David Herrmann33ca9542011-10-08 14:58:49 +02003946 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947}
3948EXPORT_SYMBOL(hci_register_dev);
3949
3950/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003951void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952{
Sasha Levin3df92b32012-05-27 22:36:56 +02003953 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003954
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003955 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
Johan Hovold94324962012-03-15 14:48:41 +01003957 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3958
Sasha Levin3df92b32012-05-27 22:36:56 +02003959 id = hdev->id;
3960
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003961 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003963 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
3965 hci_dev_do_close(hdev);
3966
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303967 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003968 kfree_skb(hdev->reassembly[i]);
3969
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003970 cancel_work_sync(&hdev->power_on);
3971
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003972 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003973 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3974 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003975 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003976 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003977 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003978 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003979
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003980 /* mgmt_index_removed should take care of emptying the
3981 * pending list */
3982 BUG_ON(!list_empty(&hdev->mgmt_pending));
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 hci_notify(hdev, HCI_DEV_UNREG);
3985
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003986 if (hdev->rfkill) {
3987 rfkill_unregister(hdev->rfkill);
3988 rfkill_destroy(hdev->rfkill);
3989 }
3990
Johan Hedberg99780a72014-02-18 10:40:07 +02003991 if (hdev->tfm_aes)
3992 crypto_free_blkcipher(hdev->tfm_aes);
3993
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003994 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003995
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003996 debugfs_remove_recursive(hdev->debugfs);
3997
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003998 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003999 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01004000
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004001 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004002 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02004003 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02004004 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03004005 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02004006 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01004007 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08004008 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03004009 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03004010 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02004011
David Herrmanndc946bd2012-01-07 15:47:24 +01004012 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02004013
4014 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015}
4016EXPORT_SYMBOL(hci_unregister_dev);
4017
4018/* Suspend HCI device */
4019int hci_suspend_dev(struct hci_dev *hdev)
4020{
4021 hci_notify(hdev, HCI_DEV_SUSPEND);
4022 return 0;
4023}
4024EXPORT_SYMBOL(hci_suspend_dev);
4025
4026/* Resume HCI device */
4027int hci_resume_dev(struct hci_dev *hdev)
4028{
4029 hci_notify(hdev, HCI_DEV_RESUME);
4030 return 0;
4031}
4032EXPORT_SYMBOL(hci_resume_dev);
4033
Marcel Holtmann76bca882009-11-18 00:40:39 +01004034/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004035int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01004036{
Marcel Holtmann76bca882009-11-18 00:40:39 +01004037 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004038 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01004039 kfree_skb(skb);
4040 return -ENXIO;
4041 }
4042
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004043 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004044 bt_cb(skb)->incoming = 1;
4045
4046 /* Time stamp */
4047 __net_timestamp(skb);
4048
Marcel Holtmann76bca882009-11-18 00:40:39 +01004049 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004050 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004051
Marcel Holtmann76bca882009-11-18 00:40:39 +01004052 return 0;
4053}
4054EXPORT_SYMBOL(hci_recv_frame);
4055
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304056static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004057 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304058{
4059 int len = 0;
4060 int hlen = 0;
4061 int remain = count;
4062 struct sk_buff *skb;
4063 struct bt_skb_cb *scb;
4064
4065 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004066 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304067 return -EILSEQ;
4068
4069 skb = hdev->reassembly[index];
4070
4071 if (!skb) {
4072 switch (type) {
4073 case HCI_ACLDATA_PKT:
4074 len = HCI_MAX_FRAME_SIZE;
4075 hlen = HCI_ACL_HDR_SIZE;
4076 break;
4077 case HCI_EVENT_PKT:
4078 len = HCI_MAX_EVENT_SIZE;
4079 hlen = HCI_EVENT_HDR_SIZE;
4080 break;
4081 case HCI_SCODATA_PKT:
4082 len = HCI_MAX_SCO_SIZE;
4083 hlen = HCI_SCO_HDR_SIZE;
4084 break;
4085 }
4086
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004087 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304088 if (!skb)
4089 return -ENOMEM;
4090
4091 scb = (void *) skb->cb;
4092 scb->expect = hlen;
4093 scb->pkt_type = type;
4094
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304095 hdev->reassembly[index] = skb;
4096 }
4097
4098 while (count) {
4099 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004100 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304101
4102 memcpy(skb_put(skb, len), data, len);
4103
4104 count -= len;
4105 data += len;
4106 scb->expect -= len;
4107 remain = count;
4108
4109 switch (type) {
4110 case HCI_EVENT_PKT:
4111 if (skb->len == HCI_EVENT_HDR_SIZE) {
4112 struct hci_event_hdr *h = hci_event_hdr(skb);
4113 scb->expect = h->plen;
4114
4115 if (skb_tailroom(skb) < scb->expect) {
4116 kfree_skb(skb);
4117 hdev->reassembly[index] = NULL;
4118 return -ENOMEM;
4119 }
4120 }
4121 break;
4122
4123 case HCI_ACLDATA_PKT:
4124 if (skb->len == HCI_ACL_HDR_SIZE) {
4125 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4126 scb->expect = __le16_to_cpu(h->dlen);
4127
4128 if (skb_tailroom(skb) < scb->expect) {
4129 kfree_skb(skb);
4130 hdev->reassembly[index] = NULL;
4131 return -ENOMEM;
4132 }
4133 }
4134 break;
4135
4136 case HCI_SCODATA_PKT:
4137 if (skb->len == HCI_SCO_HDR_SIZE) {
4138 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4139 scb->expect = h->dlen;
4140
4141 if (skb_tailroom(skb) < scb->expect) {
4142 kfree_skb(skb);
4143 hdev->reassembly[index] = NULL;
4144 return -ENOMEM;
4145 }
4146 }
4147 break;
4148 }
4149
4150 if (scb->expect == 0) {
4151 /* Complete frame */
4152
4153 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004154 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304155
4156 hdev->reassembly[index] = NULL;
4157 return remain;
4158 }
4159 }
4160
4161 return remain;
4162}
4163
Marcel Holtmannef222012007-07-11 06:42:04 +02004164int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4165{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304166 int rem = 0;
4167
Marcel Holtmannef222012007-07-11 06:42:04 +02004168 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4169 return -EILSEQ;
4170
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004171 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004172 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304173 if (rem < 0)
4174 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004175
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304176 data += (count - rem);
4177 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004178 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004179
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304180 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004181}
4182EXPORT_SYMBOL(hci_recv_fragment);
4183
Suraj Sumangala99811512010-07-14 13:02:19 +05304184#define STREAM_REASSEMBLY 0
4185
4186int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4187{
4188 int type;
4189 int rem = 0;
4190
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004191 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304192 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4193
4194 if (!skb) {
4195 struct { char type; } *pkt;
4196
4197 /* Start of the frame */
4198 pkt = data;
4199 type = pkt->type;
4200
4201 data++;
4202 count--;
4203 } else
4204 type = bt_cb(skb)->pkt_type;
4205
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004206 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004207 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304208 if (rem < 0)
4209 return rem;
4210
4211 data += (count - rem);
4212 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004213 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304214
4215 return rem;
4216}
4217EXPORT_SYMBOL(hci_recv_stream_fragment);
4218
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219/* ---- Interface to upper protocols ---- */
4220
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221int hci_register_cb(struct hci_cb *cb)
4222{
4223 BT_DBG("%p name %s", cb, cb->name);
4224
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004225 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004227 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
4229 return 0;
4230}
4231EXPORT_SYMBOL(hci_register_cb);
4232
4233int hci_unregister_cb(struct hci_cb *cb)
4234{
4235 BT_DBG("%p name %s", cb, cb->name);
4236
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004237 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004238 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004239 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240
4241 return 0;
4242}
4243EXPORT_SYMBOL(hci_unregister_cb);
4244
Marcel Holtmann51086992013-10-10 14:54:19 -07004245static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004247 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004249 /* Time stamp */
4250 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004252 /* Send copy to monitor */
4253 hci_send_to_monitor(hdev, skb);
4254
4255 if (atomic_read(&hdev->promisc)) {
4256 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004257 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258 }
4259
4260 /* Get rid of skb owner, prior to sending to the driver. */
4261 skb_orphan(skb);
4262
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004263 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004264 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265}
4266
Johan Hedberg3119ae92013-03-05 20:37:44 +02004267void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4268{
4269 skb_queue_head_init(&req->cmd_q);
4270 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004271 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004272}
4273
4274int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4275{
4276 struct hci_dev *hdev = req->hdev;
4277 struct sk_buff *skb;
4278 unsigned long flags;
4279
4280 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4281
Andre Guedes5d73e032013-03-08 11:20:16 -03004282 /* If an error occured during request building, remove all HCI
4283 * commands queued on the HCI request queue.
4284 */
4285 if (req->err) {
4286 skb_queue_purge(&req->cmd_q);
4287 return req->err;
4288 }
4289
Johan Hedberg3119ae92013-03-05 20:37:44 +02004290 /* Do not allow empty requests */
4291 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004292 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004293
4294 skb = skb_peek_tail(&req->cmd_q);
4295 bt_cb(skb)->req.complete = complete;
4296
4297 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4298 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4299 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4300
4301 queue_work(hdev->workqueue, &hdev->cmd_work);
4302
4303 return 0;
4304}
4305
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004306static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004307 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308{
4309 int len = HCI_COMMAND_HDR_SIZE + plen;
4310 struct hci_command_hdr *hdr;
4311 struct sk_buff *skb;
4312
Linus Torvalds1da177e2005-04-16 15:20:36 -07004313 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004314 if (!skb)
4315 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
4317 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004318 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 hdr->plen = plen;
4320
4321 if (plen)
4322 memcpy(skb_put(skb, plen), param, plen);
4323
4324 BT_DBG("skb len %d", skb->len);
4325
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004326 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004327
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004328 return skb;
4329}
4330
4331/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004332int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4333 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004334{
4335 struct sk_buff *skb;
4336
4337 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4338
4339 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4340 if (!skb) {
4341 BT_ERR("%s no memory for command", hdev->name);
4342 return -ENOMEM;
4343 }
4344
Johan Hedberg11714b32013-03-05 20:37:47 +02004345 /* Stand-alone HCI commands must be flaged as
4346 * single-command requests.
4347 */
4348 bt_cb(skb)->req.start = true;
4349
Linus Torvalds1da177e2005-04-16 15:20:36 -07004350 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004351 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 return 0;
4354}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
Johan Hedberg71c76a12013-03-05 20:37:46 +02004356/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004357void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4358 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004359{
4360 struct hci_dev *hdev = req->hdev;
4361 struct sk_buff *skb;
4362
4363 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4364
Andre Guedes34739c12013-03-08 11:20:18 -03004365 /* If an error occured during request building, there is no point in
4366 * queueing the HCI command. We can simply return.
4367 */
4368 if (req->err)
4369 return;
4370
Johan Hedberg71c76a12013-03-05 20:37:46 +02004371 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4372 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004373 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4374 hdev->name, opcode);
4375 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004376 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004377 }
4378
4379 if (skb_queue_empty(&req->cmd_q))
4380 bt_cb(skb)->req.start = true;
4381
Johan Hedberg02350a72013-04-03 21:50:29 +03004382 bt_cb(skb)->req.event = event;
4383
Johan Hedberg71c76a12013-03-05 20:37:46 +02004384 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004385}
4386
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004387void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4388 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004389{
4390 hci_req_add_ev(req, opcode, plen, param, 0);
4391}
4392
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004394void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395{
4396 struct hci_command_hdr *hdr;
4397
4398 if (!hdev->sent_cmd)
4399 return NULL;
4400
4401 hdr = (void *) hdev->sent_cmd->data;
4402
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004403 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 return NULL;
4405
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004406 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407
4408 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4409}
4410
4411/* Send ACL data */
4412static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4413{
4414 struct hci_acl_hdr *hdr;
4415 int len = skb->len;
4416
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004417 skb_push(skb, HCI_ACL_HDR_SIZE);
4418 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004419 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004420 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4421 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422}
4423
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004424static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004425 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004427 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004428 struct hci_dev *hdev = conn->hdev;
4429 struct sk_buff *list;
4430
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004431 skb->len = skb_headlen(skb);
4432 skb->data_len = 0;
4433
4434 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004435
4436 switch (hdev->dev_type) {
4437 case HCI_BREDR:
4438 hci_add_acl_hdr(skb, conn->handle, flags);
4439 break;
4440 case HCI_AMP:
4441 hci_add_acl_hdr(skb, chan->handle, flags);
4442 break;
4443 default:
4444 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4445 return;
4446 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004447
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004448 list = skb_shinfo(skb)->frag_list;
4449 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450 /* Non fragmented */
4451 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4452
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004453 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 } else {
4455 /* Fragmented */
4456 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4457
4458 skb_shinfo(skb)->frag_list = NULL;
4459
4460 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004461 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004463 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004464
4465 flags &= ~ACL_START;
4466 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467 do {
4468 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004469
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004470 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004471 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472
4473 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4474
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004475 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 } while (list);
4477
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004478 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004480}
4481
4482void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4483{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004484 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004485
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004486 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004487
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004488 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004490 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004492
4493/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004494void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495{
4496 struct hci_dev *hdev = conn->hdev;
4497 struct hci_sco_hdr hdr;
4498
4499 BT_DBG("%s len %d", hdev->name, skb->len);
4500
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004501 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004502 hdr.dlen = skb->len;
4503
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004504 skb_push(skb, HCI_SCO_HDR_SIZE);
4505 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004506 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004508 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004509
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004511 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004512}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513
4514/* ---- HCI TX task (outgoing data) ---- */
4515
4516/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004517static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4518 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004519{
4520 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004521 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004522 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004523
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004524 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004526
4527 rcu_read_lock();
4528
4529 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004530 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004531 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004532
4533 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4534 continue;
4535
Linus Torvalds1da177e2005-04-16 15:20:36 -07004536 num++;
4537
4538 if (c->sent < min) {
4539 min = c->sent;
4540 conn = c;
4541 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004542
4543 if (hci_conn_num(hdev, type) == num)
4544 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004545 }
4546
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004547 rcu_read_unlock();
4548
Linus Torvalds1da177e2005-04-16 15:20:36 -07004549 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004550 int cnt, q;
4551
4552 switch (conn->type) {
4553 case ACL_LINK:
4554 cnt = hdev->acl_cnt;
4555 break;
4556 case SCO_LINK:
4557 case ESCO_LINK:
4558 cnt = hdev->sco_cnt;
4559 break;
4560 case LE_LINK:
4561 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4562 break;
4563 default:
4564 cnt = 0;
4565 BT_ERR("Unknown link type");
4566 }
4567
4568 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004569 *quote = q ? q : 1;
4570 } else
4571 *quote = 0;
4572
4573 BT_DBG("conn %p quote %d", conn, *quote);
4574 return conn;
4575}
4576
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004577static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004578{
4579 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004580 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004581
Ville Tervobae1f5d92011-02-10 22:38:53 -03004582 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004583
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004584 rcu_read_lock();
4585
Linus Torvalds1da177e2005-04-16 15:20:36 -07004586 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004587 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004588 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004589 BT_ERR("%s killing stalled connection %pMR",
4590 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004591 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004592 }
4593 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004594
4595 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004596}
4597
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004598static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4599 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004600{
4601 struct hci_conn_hash *h = &hdev->conn_hash;
4602 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004603 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004604 struct hci_conn *conn;
4605 int cnt, q, conn_num = 0;
4606
4607 BT_DBG("%s", hdev->name);
4608
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004609 rcu_read_lock();
4610
4611 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004612 struct hci_chan *tmp;
4613
4614 if (conn->type != type)
4615 continue;
4616
4617 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4618 continue;
4619
4620 conn_num++;
4621
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004622 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004623 struct sk_buff *skb;
4624
4625 if (skb_queue_empty(&tmp->data_q))
4626 continue;
4627
4628 skb = skb_peek(&tmp->data_q);
4629 if (skb->priority < cur_prio)
4630 continue;
4631
4632 if (skb->priority > cur_prio) {
4633 num = 0;
4634 min = ~0;
4635 cur_prio = skb->priority;
4636 }
4637
4638 num++;
4639
4640 if (conn->sent < min) {
4641 min = conn->sent;
4642 chan = tmp;
4643 }
4644 }
4645
4646 if (hci_conn_num(hdev, type) == conn_num)
4647 break;
4648 }
4649
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004650 rcu_read_unlock();
4651
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004652 if (!chan)
4653 return NULL;
4654
4655 switch (chan->conn->type) {
4656 case ACL_LINK:
4657 cnt = hdev->acl_cnt;
4658 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004659 case AMP_LINK:
4660 cnt = hdev->block_cnt;
4661 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004662 case SCO_LINK:
4663 case ESCO_LINK:
4664 cnt = hdev->sco_cnt;
4665 break;
4666 case LE_LINK:
4667 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4668 break;
4669 default:
4670 cnt = 0;
4671 BT_ERR("Unknown link type");
4672 }
4673
4674 q = cnt / num;
4675 *quote = q ? q : 1;
4676 BT_DBG("chan %p quote %d", chan, *quote);
4677 return chan;
4678}
4679
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004680static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4681{
4682 struct hci_conn_hash *h = &hdev->conn_hash;
4683 struct hci_conn *conn;
4684 int num = 0;
4685
4686 BT_DBG("%s", hdev->name);
4687
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004688 rcu_read_lock();
4689
4690 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004691 struct hci_chan *chan;
4692
4693 if (conn->type != type)
4694 continue;
4695
4696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4697 continue;
4698
4699 num++;
4700
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004701 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004702 struct sk_buff *skb;
4703
4704 if (chan->sent) {
4705 chan->sent = 0;
4706 continue;
4707 }
4708
4709 if (skb_queue_empty(&chan->data_q))
4710 continue;
4711
4712 skb = skb_peek(&chan->data_q);
4713 if (skb->priority >= HCI_PRIO_MAX - 1)
4714 continue;
4715
4716 skb->priority = HCI_PRIO_MAX - 1;
4717
4718 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004719 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004720 }
4721
4722 if (hci_conn_num(hdev, type) == num)
4723 break;
4724 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004725
4726 rcu_read_unlock();
4727
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004728}
4729
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004730static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4731{
4732 /* Calculate count of blocks used by this packet */
4733 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4734}
4735
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004736static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004738 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004739 /* ACL tx timeout must be longer than maximum
4740 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004741 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004742 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004743 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004744 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004745}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004746
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004747static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004748{
4749 unsigned int cnt = hdev->acl_cnt;
4750 struct hci_chan *chan;
4751 struct sk_buff *skb;
4752 int quote;
4753
4754 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004755
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004756 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004757 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004758 u32 priority = (skb_peek(&chan->data_q))->priority;
4759 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004760 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004761 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004762
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004763 /* Stop if priority has changed */
4764 if (skb->priority < priority)
4765 break;
4766
4767 skb = skb_dequeue(&chan->data_q);
4768
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004769 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004770 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004771
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004772 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004773 hdev->acl_last_tx = jiffies;
4774
4775 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004776 chan->sent++;
4777 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 }
4779 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004780
4781 if (cnt != hdev->acl_cnt)
4782 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783}
4784
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004785static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004786{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004787 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004788 struct hci_chan *chan;
4789 struct sk_buff *skb;
4790 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004791 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004792
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004793 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004794
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004795 BT_DBG("%s", hdev->name);
4796
4797 if (hdev->dev_type == HCI_AMP)
4798 type = AMP_LINK;
4799 else
4800 type = ACL_LINK;
4801
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004802 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004803 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004804 u32 priority = (skb_peek(&chan->data_q))->priority;
4805 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4806 int blocks;
4807
4808 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004809 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004810
4811 /* Stop if priority has changed */
4812 if (skb->priority < priority)
4813 break;
4814
4815 skb = skb_dequeue(&chan->data_q);
4816
4817 blocks = __get_blocks(hdev, skb);
4818 if (blocks > hdev->block_cnt)
4819 return;
4820
4821 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004822 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004823
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004824 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004825 hdev->acl_last_tx = jiffies;
4826
4827 hdev->block_cnt -= blocks;
4828 quote -= blocks;
4829
4830 chan->sent += blocks;
4831 chan->conn->sent += blocks;
4832 }
4833 }
4834
4835 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004836 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004837}
4838
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004839static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004840{
4841 BT_DBG("%s", hdev->name);
4842
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004843 /* No ACL link over BR/EDR controller */
4844 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4845 return;
4846
4847 /* No AMP link over AMP controller */
4848 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004849 return;
4850
4851 switch (hdev->flow_ctl_mode) {
4852 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4853 hci_sched_acl_pkt(hdev);
4854 break;
4855
4856 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4857 hci_sched_acl_blk(hdev);
4858 break;
4859 }
4860}
4861
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004863static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864{
4865 struct hci_conn *conn;
4866 struct sk_buff *skb;
4867 int quote;
4868
4869 BT_DBG("%s", hdev->name);
4870
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004871 if (!hci_conn_num(hdev, SCO_LINK))
4872 return;
4873
Linus Torvalds1da177e2005-04-16 15:20:36 -07004874 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4875 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4876 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004877 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004878
4879 conn->sent++;
4880 if (conn->sent == ~0)
4881 conn->sent = 0;
4882 }
4883 }
4884}
4885
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004886static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004887{
4888 struct hci_conn *conn;
4889 struct sk_buff *skb;
4890 int quote;
4891
4892 BT_DBG("%s", hdev->name);
4893
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004894 if (!hci_conn_num(hdev, ESCO_LINK))
4895 return;
4896
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004897 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4898 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004899 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4900 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004901 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004902
4903 conn->sent++;
4904 if (conn->sent == ~0)
4905 conn->sent = 0;
4906 }
4907 }
4908}
4909
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004910static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004911{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004912 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004913 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004914 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004915
4916 BT_DBG("%s", hdev->name);
4917
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004918 if (!hci_conn_num(hdev, LE_LINK))
4919 return;
4920
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004921 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004922 /* LE tx timeout must be longer than maximum
4923 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004924 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004925 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004926 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004927 }
4928
4929 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004930 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004931 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004932 u32 priority = (skb_peek(&chan->data_q))->priority;
4933 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004934 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004935 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004936
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004937 /* Stop if priority has changed */
4938 if (skb->priority < priority)
4939 break;
4940
4941 skb = skb_dequeue(&chan->data_q);
4942
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004943 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004944 hdev->le_last_tx = jiffies;
4945
4946 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004947 chan->sent++;
4948 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004949 }
4950 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004951
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004952 if (hdev->le_pkts)
4953 hdev->le_cnt = cnt;
4954 else
4955 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004956
4957 if (cnt != tmp)
4958 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004959}
4960
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004961static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004963 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 struct sk_buff *skb;
4965
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004966 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004967 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004968
Marcel Holtmann52de5992013-09-03 18:08:38 -07004969 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4970 /* Schedule queues and send stuff to HCI driver */
4971 hci_sched_acl(hdev);
4972 hci_sched_sco(hdev);
4973 hci_sched_esco(hdev);
4974 hci_sched_le(hdev);
4975 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004976
Linus Torvalds1da177e2005-04-16 15:20:36 -07004977 /* Send next queued raw (unknown type) packet */
4978 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004979 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004980}
4981
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004982/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983
4984/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004985static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986{
4987 struct hci_acl_hdr *hdr = (void *) skb->data;
4988 struct hci_conn *conn;
4989 __u16 handle, flags;
4990
4991 skb_pull(skb, HCI_ACL_HDR_SIZE);
4992
4993 handle = __le16_to_cpu(hdr->handle);
4994 flags = hci_flags(handle);
4995 handle = hci_handle(handle);
4996
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004997 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004998 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004999
5000 hdev->stat.acl_rx++;
5001
5002 hci_dev_lock(hdev);
5003 conn = hci_conn_hash_lookup_handle(hdev, handle);
5004 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005005
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08005007 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02005008
Linus Torvalds1da177e2005-04-16 15:20:36 -07005009 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005010 l2cap_recv_acldata(conn, skb, flags);
5011 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005013 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005014 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 }
5016
5017 kfree_skb(skb);
5018}
5019
5020/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03005021static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005022{
5023 struct hci_sco_hdr *hdr = (void *) skb->data;
5024 struct hci_conn *conn;
5025 __u16 handle;
5026
5027 skb_pull(skb, HCI_SCO_HDR_SIZE);
5028
5029 handle = __le16_to_cpu(hdr->handle);
5030
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03005031 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 hdev->stat.sco_rx++;
5034
5035 hci_dev_lock(hdev);
5036 conn = hci_conn_hash_lookup_handle(hdev, handle);
5037 hci_dev_unlock(hdev);
5038
5039 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005040 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005041 sco_recv_scodata(conn, skb);
5042 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005043 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005044 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005045 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005046 }
5047
5048 kfree_skb(skb);
5049}
5050
Johan Hedberg9238f362013-03-05 20:37:48 +02005051static bool hci_req_is_complete(struct hci_dev *hdev)
5052{
5053 struct sk_buff *skb;
5054
5055 skb = skb_peek(&hdev->cmd_q);
5056 if (!skb)
5057 return true;
5058
5059 return bt_cb(skb)->req.start;
5060}
5061
Johan Hedberg42c6b122013-03-05 20:37:49 +02005062static void hci_resend_last(struct hci_dev *hdev)
5063{
5064 struct hci_command_hdr *sent;
5065 struct sk_buff *skb;
5066 u16 opcode;
5067
5068 if (!hdev->sent_cmd)
5069 return;
5070
5071 sent = (void *) hdev->sent_cmd->data;
5072 opcode = __le16_to_cpu(sent->opcode);
5073 if (opcode == HCI_OP_RESET)
5074 return;
5075
5076 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5077 if (!skb)
5078 return;
5079
5080 skb_queue_head(&hdev->cmd_q, skb);
5081 queue_work(hdev->workqueue, &hdev->cmd_work);
5082}
5083
Johan Hedberg9238f362013-03-05 20:37:48 +02005084void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5085{
5086 hci_req_complete_t req_complete = NULL;
5087 struct sk_buff *skb;
5088 unsigned long flags;
5089
5090 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5091
Johan Hedberg42c6b122013-03-05 20:37:49 +02005092 /* If the completed command doesn't match the last one that was
5093 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005094 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005095 if (!hci_sent_cmd_data(hdev, opcode)) {
5096 /* Some CSR based controllers generate a spontaneous
5097 * reset complete event during init and any pending
5098 * command will never be completed. In such a case we
5099 * need to resend whatever was the last sent
5100 * command.
5101 */
5102 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5103 hci_resend_last(hdev);
5104
Johan Hedberg9238f362013-03-05 20:37:48 +02005105 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005106 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005107
5108 /* If the command succeeded and there's still more commands in
5109 * this request the request is not yet complete.
5110 */
5111 if (!status && !hci_req_is_complete(hdev))
5112 return;
5113
5114 /* If this was the last command in a request the complete
5115 * callback would be found in hdev->sent_cmd instead of the
5116 * command queue (hdev->cmd_q).
5117 */
5118 if (hdev->sent_cmd) {
5119 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005120
5121 if (req_complete) {
5122 /* We must set the complete callback to NULL to
5123 * avoid calling the callback more than once if
5124 * this function gets called again.
5125 */
5126 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5127
Johan Hedberg9238f362013-03-05 20:37:48 +02005128 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005129 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005130 }
5131
5132 /* Remove all pending commands belonging to this request */
5133 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5134 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5135 if (bt_cb(skb)->req.start) {
5136 __skb_queue_head(&hdev->cmd_q, skb);
5137 break;
5138 }
5139
5140 req_complete = bt_cb(skb)->req.complete;
5141 kfree_skb(skb);
5142 }
5143 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5144
5145call_complete:
5146 if (req_complete)
5147 req_complete(hdev, status);
5148}
5149
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005150static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005151{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005152 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005153 struct sk_buff *skb;
5154
5155 BT_DBG("%s", hdev->name);
5156
Linus Torvalds1da177e2005-04-16 15:20:36 -07005157 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005158 /* Send copy to monitor */
5159 hci_send_to_monitor(hdev, skb);
5160
Linus Torvalds1da177e2005-04-16 15:20:36 -07005161 if (atomic_read(&hdev->promisc)) {
5162 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005163 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005164 }
5165
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 kfree_skb(skb);
5168 continue;
5169 }
5170
5171 if (test_bit(HCI_INIT, &hdev->flags)) {
5172 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005173 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005174 case HCI_ACLDATA_PKT:
5175 case HCI_SCODATA_PKT:
5176 kfree_skb(skb);
5177 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005179 }
5180
5181 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005182 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005184 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185 hci_event_packet(hdev, skb);
5186 break;
5187
5188 case HCI_ACLDATA_PKT:
5189 BT_DBG("%s ACL data packet", hdev->name);
5190 hci_acldata_packet(hdev, skb);
5191 break;
5192
5193 case HCI_SCODATA_PKT:
5194 BT_DBG("%s SCO data packet", hdev->name);
5195 hci_scodata_packet(hdev, skb);
5196 break;
5197
5198 default:
5199 kfree_skb(skb);
5200 break;
5201 }
5202 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005205static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005206{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005207 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 struct sk_buff *skb;
5209
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005210 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5211 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212
Linus Torvalds1da177e2005-04-16 15:20:36 -07005213 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005214 if (atomic_read(&hdev->cmd_cnt)) {
5215 skb = skb_dequeue(&hdev->cmd_q);
5216 if (!skb)
5217 return;
5218
Wei Yongjun7585b972009-02-25 18:29:52 +08005219 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005220
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005221 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005222 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005223 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005224 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005225 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005226 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005227 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005228 schedule_delayed_work(&hdev->cmd_timer,
5229 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005230 } else {
5231 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005232 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 }
5234 }
5235}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005236
5237void hci_req_add_le_scan_disable(struct hci_request *req)
5238{
5239 struct hci_cp_le_set_scan_enable cp;
5240
5241 memset(&cp, 0, sizeof(cp));
5242 cp.enable = LE_SCAN_DISABLE;
5243 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5244}
Andre Guedesa4790db2014-02-26 20:21:47 -03005245
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005246void hci_req_add_le_passive_scan(struct hci_request *req)
5247{
5248 struct hci_cp_le_set_scan_param param_cp;
5249 struct hci_cp_le_set_scan_enable enable_cp;
5250 struct hci_dev *hdev = req->hdev;
5251 u8 own_addr_type;
5252
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005253 /* Set require_privacy to false since no SCAN_REQ are send
5254 * during passive scanning. Not using an unresolvable address
5255 * here is important so that peer devices using direct
5256 * advertising with our address will be correctly reported
5257 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005258 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005259 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005260 return;
5261
5262 memset(&param_cp, 0, sizeof(param_cp));
5263 param_cp.type = LE_SCAN_PASSIVE;
5264 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5265 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5266 param_cp.own_address_type = own_addr_type;
5267 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5268 &param_cp);
5269
5270 memset(&enable_cp, 0, sizeof(enable_cp));
5271 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005272 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005273 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5274 &enable_cp);
5275}
5276
Andre Guedesa4790db2014-02-26 20:21:47 -03005277static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5278{
5279 if (status)
5280 BT_DBG("HCI request failed to update background scanning: "
5281 "status 0x%2.2x", status);
5282}
5283
5284/* This function controls the background scanning based on hdev->pend_le_conns
5285 * list. If there are pending LE connection we start the background scanning,
5286 * otherwise we stop it.
5287 *
5288 * This function requires the caller holds hdev->lock.
5289 */
5290void hci_update_background_scan(struct hci_dev *hdev)
5291{
Andre Guedesa4790db2014-02-26 20:21:47 -03005292 struct hci_request req;
5293 struct hci_conn *conn;
5294 int err;
5295
5296 hci_req_init(&req, hdev);
5297
5298 if (list_empty(&hdev->pend_le_conns)) {
5299 /* If there is no pending LE connections, we should stop
5300 * the background scanning.
5301 */
5302
5303 /* If controller is not scanning we are done. */
5304 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5305 return;
5306
5307 hci_req_add_le_scan_disable(&req);
5308
5309 BT_DBG("%s stopping background scanning", hdev->name);
5310 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005311 /* If there is at least one pending LE connection, we should
5312 * keep the background scan running.
5313 */
5314
Andre Guedesa4790db2014-02-26 20:21:47 -03005315 /* If controller is connecting, we should not start scanning
5316 * since some controllers are not able to scan and connect at
5317 * the same time.
5318 */
5319 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5320 if (conn)
5321 return;
5322
Andre Guedes4340a122014-03-10 18:26:24 -03005323 /* If controller is currently scanning, we stop it to ensure we
5324 * don't miss any advertising (due to duplicates filter).
5325 */
5326 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5327 hci_req_add_le_scan_disable(&req);
5328
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005329 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005330
5331 BT_DBG("%s starting background scanning", hdev->name);
5332 }
5333
5334 err = hci_req_run(&req, update_background_scan_complete);
5335 if (err)
5336 BT_ERR("Failed to run HCI request: err %d", err);
5337}