blob: 79d292cfb86721a67899e98e6b68900255ec7fbd [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Johan Hedberg99780a72014-02-18 10:40:07 +020032#include <linux/crypto.h>
Marcel Holtmann47219832013-10-17 17:24:15 -070033#include <asm/unaligned.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
Johan Hedberg4bc58f52014-05-20 09:45:47 +030037#include <net/bluetooth/l2cap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
Johan Hedberg970c4e42014-02-18 10:19:33 +020039#include "smp.h"
40
Marcel Holtmannb78752c2010-08-08 23:06:53 -040041static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020042static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020043static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
Sasha Levin3df92b32012-05-27 22:36:56 +020053/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/* ---- HCI notifications ---- */
57
Marcel Holtmann65164552005-10-28 19:20:48 +020058static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Marcel Holtmann040030e2012-02-20 14:50:37 +010060 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061}
62
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070063/* ---- HCI debugfs entries ---- */
64
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070065static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
Marcel Holtmann111902f2014-06-21 04:53:17 +020071 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070072 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
Marcel Holtmann111902f2014-06-21 04:53:17 +020097 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
Marcel Holtmann4b4148e2013-10-19 07:09:12 -070098 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
Marcel Holtmann111902f2014-06-21 04:53:17 +0200118 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
Marcel Holtmann4b4148e2013-10-19 07:09:12 -0700119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
Marcel Holtmanncfbb2b52013-10-19 02:25:33 -0700144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
Marcel Holtmanndfb826a2013-10-18 12:04:46 -0700151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
Marcel Holtmannb25f0782013-10-17 17:24:20 -0700175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -0700176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
Marcel Holtmann47219832013-10-17 17:24:15 -0700193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700200 u8 i, val[16];
Marcel Holtmann47219832013-10-17 17:24:15 -0700201
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
Marcel Holtmann47219832013-10-17 17:24:15 -0700208
Marcel Holtmann58f01aa2013-10-19 09:31:59 -0700209 seq_printf(f, "%pUb\n", val);
Marcel Holtmann47219832013-10-17 17:24:15 -0700210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
Marcel Holtmann02d08d12013-10-18 12:04:52 -0700264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
Marcel Holtmannbabdbb32013-10-18 12:04:51 -0700292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
Marcel Holtmann041000b2013-10-17 12:02:31 -0700316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
Marcel Holtmannebd1e332013-10-17 10:54:46 -0700330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800355static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
356 size_t count, loff_t *ppos)
357{
358 struct hci_dev *hdev = file->private_data;
359 char buf[3];
360
Marcel Holtmann111902f2014-06-21 04:53:17 +0200361 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800362 buf[1] = '\n';
363 buf[2] = '\0';
364 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
365}
366
367static ssize_t force_sc_support_write(struct file *file,
368 const char __user *user_buf,
369 size_t count, loff_t *ppos)
370{
371 struct hci_dev *hdev = file->private_data;
372 char buf[32];
373 size_t buf_size = min(count, (sizeof(buf)-1));
374 bool enable;
375
376 if (test_bit(HCI_UP, &hdev->flags))
377 return -EBUSY;
378
379 if (copy_from_user(buf, user_buf, buf_size))
380 return -EFAULT;
381
382 buf[buf_size] = '\0';
383 if (strtobool(buf, &enable))
384 return -EINVAL;
385
Marcel Holtmann111902f2014-06-21 04:53:17 +0200386 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800387 return -EALREADY;
388
Marcel Holtmann111902f2014-06-21 04:53:17 +0200389 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
Marcel Holtmann5afeac142014-01-10 02:07:27 -0800390
391 return count;
392}
393
394static const struct file_operations force_sc_support_fops = {
395 .open = simple_open,
396 .read = force_sc_support_read,
397 .write = force_sc_support_write,
398 .llseek = default_llseek,
399};
400
Marcel Holtmann134c2a82014-01-15 22:37:42 -0800401static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static const struct file_operations sc_only_mode_fops = {
414 .open = simple_open,
415 .read = sc_only_mode_read,
416 .llseek = default_llseek,
417};
418
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700419static int idle_timeout_set(void *data, u64 val)
420{
421 struct hci_dev *hdev = data;
422
423 if (val != 0 && (val < 500 || val > 3600000))
424 return -EINVAL;
425
426 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700427 hdev->idle_timeout = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700428 hci_dev_unlock(hdev);
429
430 return 0;
431}
432
433static int idle_timeout_get(void *data, u64 *val)
434{
435 struct hci_dev *hdev = data;
436
437 hci_dev_lock(hdev);
438 *val = hdev->idle_timeout;
439 hci_dev_unlock(hdev);
440
441 return 0;
442}
443
444DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
445 idle_timeout_set, "%llu\n");
446
Johan Hedbergc982b2e2014-02-23 19:42:26 +0200447static int rpa_timeout_set(void *data, u64 val)
448{
449 struct hci_dev *hdev = data;
450
451 /* Require the RPA timeout to be at least 30 seconds and at most
452 * 24 hours.
453 */
454 if (val < 30 || val > (60 * 60 * 24))
455 return -EINVAL;
456
457 hci_dev_lock(hdev);
458 hdev->rpa_timeout = val;
459 hci_dev_unlock(hdev);
460
461 return 0;
462}
463
464static int rpa_timeout_get(void *data, u64 *val)
465{
466 struct hci_dev *hdev = data;
467
468 hci_dev_lock(hdev);
469 *val = hdev->rpa_timeout;
470 hci_dev_unlock(hdev);
471
472 return 0;
473}
474
475DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
476 rpa_timeout_set, "%llu\n");
477
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700478static int sniff_min_interval_set(void *data, u64 val)
479{
480 struct hci_dev *hdev = data;
481
482 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
483 return -EINVAL;
484
485 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700486 hdev->sniff_min_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492static int sniff_min_interval_get(void *data, u64 *val)
493{
494 struct hci_dev *hdev = data;
495
496 hci_dev_lock(hdev);
497 *val = hdev->sniff_min_interval;
498 hci_dev_unlock(hdev);
499
500 return 0;
501}
502
503DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
504 sniff_min_interval_set, "%llu\n");
505
506static int sniff_max_interval_set(void *data, u64 val)
507{
508 struct hci_dev *hdev = data;
509
510 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
511 return -EINVAL;
512
513 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700514 hdev->sniff_max_interval = val;
Marcel Holtmann2bfa3532013-10-17 19:16:02 -0700515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520static int sniff_max_interval_get(void *data, u64 *val)
521{
522 struct hci_dev *hdev = data;
523
524 hci_dev_lock(hdev);
525 *val = hdev->sniff_max_interval;
526 hci_dev_unlock(hdev);
527
528 return 0;
529}
530
531DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
532 sniff_max_interval_set, "%llu\n");
533
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +0200534static int conn_info_min_age_set(void *data, u64 val)
535{
536 struct hci_dev *hdev = data;
537
538 if (val == 0 || val > hdev->conn_info_max_age)
539 return -EINVAL;
540
541 hci_dev_lock(hdev);
542 hdev->conn_info_min_age = val;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548static int conn_info_min_age_get(void *data, u64 *val)
549{
550 struct hci_dev *hdev = data;
551
552 hci_dev_lock(hdev);
553 *val = hdev->conn_info_min_age;
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
560 conn_info_min_age_set, "%llu\n");
561
562static int conn_info_max_age_set(void *data, u64 val)
563{
564 struct hci_dev *hdev = data;
565
566 if (val == 0 || val < hdev->conn_info_min_age)
567 return -EINVAL;
568
569 hci_dev_lock(hdev);
570 hdev->conn_info_max_age = val;
571 hci_dev_unlock(hdev);
572
573 return 0;
574}
575
576static int conn_info_max_age_get(void *data, u64 *val)
577{
578 struct hci_dev *hdev = data;
579
580 hci_dev_lock(hdev);
581 *val = hdev->conn_info_max_age;
582 hci_dev_unlock(hdev);
583
584 return 0;
585}
586
587DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
588 conn_info_max_age_set, "%llu\n");
589
Marcel Holtmannac345812014-02-23 12:44:25 -0800590static int identity_show(struct seq_file *f, void *p)
591{
592 struct hci_dev *hdev = f->private;
Johan Hedberga1f4c312014-02-27 14:05:41 +0200593 bdaddr_t addr;
Marcel Holtmannac345812014-02-23 12:44:25 -0800594 u8 addr_type;
595
596 hci_dev_lock(hdev);
597
Johan Hedberga1f4c312014-02-27 14:05:41 +0200598 hci_copy_identity_address(hdev, &addr, &addr_type);
Marcel Holtmannac345812014-02-23 12:44:25 -0800599
Johan Hedberga1f4c312014-02-27 14:05:41 +0200600 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
Marcel Holtmann473deef2c92014-02-23 20:39:23 -0800601 16, hdev->irk, &hdev->rpa);
Marcel Holtmannac345812014-02-23 12:44:25 -0800602
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608static int identity_open(struct inode *inode, struct file *file)
609{
610 return single_open(file, identity_show, inode->i_private);
611}
612
613static const struct file_operations identity_fops = {
614 .open = identity_open,
615 .read = seq_read,
616 .llseek = seq_lseek,
617 .release = single_release,
618};
619
Marcel Holtmann7a4cd512014-02-19 19:52:13 -0800620static int random_address_show(struct seq_file *f, void *p)
621{
622 struct hci_dev *hdev = f->private;
623
624 hci_dev_lock(hdev);
625 seq_printf(f, "%pMR\n", &hdev->random_addr);
626 hci_dev_unlock(hdev);
627
628 return 0;
629}
630
631static int random_address_open(struct inode *inode, struct file *file)
632{
633 return single_open(file, random_address_show, inode->i_private);
634}
635
636static const struct file_operations random_address_fops = {
637 .open = random_address_open,
638 .read = seq_read,
639 .llseek = seq_lseek,
640 .release = single_release,
641};
642
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -0700643static int static_address_show(struct seq_file *f, void *p)
644{
645 struct hci_dev *hdev = f->private;
646
647 hci_dev_lock(hdev);
648 seq_printf(f, "%pMR\n", &hdev->static_addr);
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654static int static_address_open(struct inode *inode, struct file *file)
655{
656 return single_open(file, static_address_show, inode->i_private);
657}
658
659static const struct file_operations static_address_fops = {
660 .open = static_address_open,
661 .read = seq_read,
662 .llseek = seq_lseek,
663 .release = single_release,
664};
665
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800666static ssize_t force_static_address_read(struct file *file,
667 char __user *user_buf,
668 size_t count, loff_t *ppos)
Marcel Holtmann92202182013-10-18 16:38:10 -0700669{
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800670 struct hci_dev *hdev = file->private_data;
671 char buf[3];
Marcel Holtmann92202182013-10-18 16:38:10 -0700672
Marcel Holtmann111902f2014-06-21 04:53:17 +0200673 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800674 buf[1] = '\n';
675 buf[2] = '\0';
676 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
677}
678
679static ssize_t force_static_address_write(struct file *file,
680 const char __user *user_buf,
681 size_t count, loff_t *ppos)
682{
683 struct hci_dev *hdev = file->private_data;
684 char buf[32];
685 size_t buf_size = min(count, (sizeof(buf)-1));
686 bool enable;
687
688 if (test_bit(HCI_UP, &hdev->flags))
689 return -EBUSY;
690
691 if (copy_from_user(buf, user_buf, buf_size))
692 return -EFAULT;
693
694 buf[buf_size] = '\0';
695 if (strtobool(buf, &enable))
Marcel Holtmann92202182013-10-18 16:38:10 -0700696 return -EINVAL;
697
Marcel Holtmann111902f2014-06-21 04:53:17 +0200698 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800699 return -EALREADY;
Marcel Holtmann92202182013-10-18 16:38:10 -0700700
Marcel Holtmann111902f2014-06-21 04:53:17 +0200701 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800702
703 return count;
Marcel Holtmann92202182013-10-18 16:38:10 -0700704}
705
Marcel Holtmannb32bba62014-02-19 19:31:26 -0800706static const struct file_operations force_static_address_fops = {
707 .open = simple_open,
708 .read = force_static_address_read,
709 .write = force_static_address_write,
710 .llseek = default_llseek,
711};
Marcel Holtmann92202182013-10-18 16:38:10 -0700712
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -0800713static int white_list_show(struct seq_file *f, void *ptr)
714{
715 struct hci_dev *hdev = f->private;
716 struct bdaddr_list *b;
717
718 hci_dev_lock(hdev);
719 list_for_each_entry(b, &hdev->le_white_list, list)
720 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
721 hci_dev_unlock(hdev);
722
723 return 0;
724}
725
726static int white_list_open(struct inode *inode, struct file *file)
727{
728 return single_open(file, white_list_show, inode->i_private);
729}
730
731static const struct file_operations white_list_fops = {
732 .open = white_list_open,
733 .read = seq_read,
734 .llseek = seq_lseek,
735 .release = single_release,
736};
737
Marcel Holtmann3698d702014-02-18 21:54:49 -0800738static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
739{
740 struct hci_dev *hdev = f->private;
741 struct list_head *p, *n;
742
743 hci_dev_lock(hdev);
744 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
745 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
746 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
747 &irk->bdaddr, irk->addr_type,
748 16, irk->val, &irk->rpa);
749 }
750 hci_dev_unlock(hdev);
751
752 return 0;
753}
754
755static int identity_resolving_keys_open(struct inode *inode, struct file *file)
756{
757 return single_open(file, identity_resolving_keys_show,
758 inode->i_private);
759}
760
761static const struct file_operations identity_resolving_keys_fops = {
762 .open = identity_resolving_keys_open,
763 .read = seq_read,
764 .llseek = seq_lseek,
765 .release = single_release,
766};
767
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700768static int long_term_keys_show(struct seq_file *f, void *ptr)
769{
770 struct hci_dev *hdev = f->private;
771 struct list_head *p, *n;
772
773 hci_dev_lock(hdev);
Johan Hedbergf813f1b2014-01-30 19:39:57 -0800774 list_for_each_safe(p, n, &hdev->long_term_keys) {
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700775 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800776 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700777 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
778 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -0800779 __le64_to_cpu(ltk->rand), 16, ltk->val);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -0700780 }
781 hci_dev_unlock(hdev);
782
783 return 0;
784}
785
786static int long_term_keys_open(struct inode *inode, struct file *file)
787{
788 return single_open(file, long_term_keys_show, inode->i_private);
789}
790
791static const struct file_operations long_term_keys_fops = {
792 .open = long_term_keys_open,
793 .read = seq_read,
794 .llseek = seq_lseek,
795 .release = single_release,
796};
797
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700798static int conn_min_interval_set(void *data, u64 val)
799{
800 struct hci_dev *hdev = data;
801
802 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
803 return -EINVAL;
804
805 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700806 hdev->le_conn_min_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700807 hci_dev_unlock(hdev);
808
809 return 0;
810}
811
812static int conn_min_interval_get(void *data, u64 *val)
813{
814 struct hci_dev *hdev = data;
815
816 hci_dev_lock(hdev);
817 *val = hdev->le_conn_min_interval;
818 hci_dev_unlock(hdev);
819
820 return 0;
821}
822
823DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
824 conn_min_interval_set, "%llu\n");
825
826static int conn_max_interval_set(void *data, u64 val)
827{
828 struct hci_dev *hdev = data;
829
830 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
831 return -EINVAL;
832
833 hci_dev_lock(hdev);
Marcel Holtmann2be48b62013-10-19 10:19:15 -0700834 hdev->le_conn_max_interval = val;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -0700835 hci_dev_unlock(hdev);
836
837 return 0;
838}
839
840static int conn_max_interval_get(void *data, u64 *val)
841{
842 struct hci_dev *hdev = data;
843
844 hci_dev_lock(hdev);
845 *val = hdev->le_conn_max_interval;
846 hci_dev_unlock(hdev);
847
848 return 0;
849}
850
851DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
852 conn_max_interval_set, "%llu\n");
853
Marcel Holtmann816a93d2014-06-30 12:34:37 +0200854static int conn_latency_set(void *data, u64 val)
855{
856 struct hci_dev *hdev = data;
857
858 if (val > 0x01f3)
859 return -EINVAL;
860
861 hci_dev_lock(hdev);
862 hdev->le_conn_latency = val;
863 hci_dev_unlock(hdev);
864
865 return 0;
866}
867
868static int conn_latency_get(void *data, u64 *val)
869{
870 struct hci_dev *hdev = data;
871
872 hci_dev_lock(hdev);
873 *val = hdev->le_conn_latency;
874 hci_dev_unlock(hdev);
875
876 return 0;
877}
878
879DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
880 conn_latency_set, "%llu\n");
881
Marcel Holtmann3f959d42014-02-20 11:55:56 -0800882static int adv_channel_map_set(void *data, u64 val)
883{
884 struct hci_dev *hdev = data;
885
886 if (val < 0x01 || val > 0x07)
887 return -EINVAL;
888
889 hci_dev_lock(hdev);
890 hdev->le_adv_channel_map = val;
891 hci_dev_unlock(hdev);
892
893 return 0;
894}
895
896static int adv_channel_map_get(void *data, u64 *val)
897{
898 struct hci_dev *hdev = data;
899
900 hci_dev_lock(hdev);
901 *val = hdev->le_adv_channel_map;
902 hci_dev_unlock(hdev);
903
904 return 0;
905}
906
907DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
908 adv_channel_map_set, "%llu\n");
909
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200910static int device_list_show(struct seq_file *f, void *ptr)
Andre Guedes7d474e02014-02-26 20:21:54 -0300911{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200912 struct hci_dev *hdev = f->private;
Andre Guedes7d474e02014-02-26 20:21:54 -0300913 struct hci_conn_params *p;
914
915 hci_dev_lock(hdev);
Andre Guedes7d474e02014-02-26 20:21:54 -0300916 list_for_each_entry(p, &hdev->le_conn_params, list) {
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200917 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
Andre Guedes7d474e02014-02-26 20:21:54 -0300918 p->auto_connect);
919 }
Andre Guedes7d474e02014-02-26 20:21:54 -0300920 hci_dev_unlock(hdev);
921
922 return 0;
923}
924
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200925static int device_list_open(struct inode *inode, struct file *file)
Andre Guedes7d474e02014-02-26 20:21:54 -0300926{
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200927 return single_open(file, device_list_show, inode->i_private);
Andre Guedes7d474e02014-02-26 20:21:54 -0300928}
929
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +0200930static const struct file_operations device_list_fops = {
931 .open = device_list_open,
Andre Guedes7d474e02014-02-26 20:21:54 -0300932 .read = seq_read,
Andre Guedes7d474e02014-02-26 20:21:54 -0300933 .llseek = seq_lseek,
934 .release = single_release,
935};
936
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937/* ---- HCI requests ---- */
938
Johan Hedberg42c6b122013-03-05 20:37:49 +0200939static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200941 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
943 if (hdev->req_status == HCI_REQ_PEND) {
944 hdev->req_result = result;
945 hdev->req_status = HCI_REQ_DONE;
946 wake_up_interruptible(&hdev->req_wait_q);
947 }
948}
949
950static void hci_req_cancel(struct hci_dev *hdev, int err)
951{
952 BT_DBG("%s err 0x%2.2x", hdev->name, err);
953
954 if (hdev->req_status == HCI_REQ_PEND) {
955 hdev->req_result = err;
956 hdev->req_status = HCI_REQ_CANCELED;
957 wake_up_interruptible(&hdev->req_wait_q);
958 }
959}
960
Fengguang Wu77a63e02013-04-20 16:24:31 +0300961static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
962 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300963{
964 struct hci_ev_cmd_complete *ev;
965 struct hci_event_hdr *hdr;
966 struct sk_buff *skb;
967
968 hci_dev_lock(hdev);
969
970 skb = hdev->recv_evt;
971 hdev->recv_evt = NULL;
972
973 hci_dev_unlock(hdev);
974
975 if (!skb)
976 return ERR_PTR(-ENODATA);
977
978 if (skb->len < sizeof(*hdr)) {
979 BT_ERR("Too short HCI event");
980 goto failed;
981 }
982
983 hdr = (void *) skb->data;
984 skb_pull(skb, HCI_EVENT_HDR_SIZE);
985
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300986 if (event) {
987 if (hdr->evt != event)
988 goto failed;
989 return skb;
990 }
991
Johan Hedberg75e84b72013-04-02 13:35:04 +0300992 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
993 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
994 goto failed;
995 }
996
997 if (skb->len < sizeof(*ev)) {
998 BT_ERR("Too short cmd_complete event");
999 goto failed;
1000 }
1001
1002 ev = (void *) skb->data;
1003 skb_pull(skb, sizeof(*ev));
1004
1005 if (opcode == __le16_to_cpu(ev->opcode))
1006 return skb;
1007
1008 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1009 __le16_to_cpu(ev->opcode));
1010
1011failed:
1012 kfree_skb(skb);
1013 return ERR_PTR(-ENODATA);
1014}
1015
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001016struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001017 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +03001018{
1019 DECLARE_WAITQUEUE(wait, current);
1020 struct hci_request req;
1021 int err = 0;
1022
1023 BT_DBG("%s", hdev->name);
1024
1025 hci_req_init(&req, hdev);
1026
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001027 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001028
1029 hdev->req_status = HCI_REQ_PEND;
1030
1031 err = hci_req_run(&req, hci_req_sync_complete);
1032 if (err < 0)
1033 return ERR_PTR(err);
1034
1035 add_wait_queue(&hdev->req_wait_q, &wait);
1036 set_current_state(TASK_INTERRUPTIBLE);
1037
1038 schedule_timeout(timeout);
1039
1040 remove_wait_queue(&hdev->req_wait_q, &wait);
1041
1042 if (signal_pending(current))
1043 return ERR_PTR(-EINTR);
1044
1045 switch (hdev->req_status) {
1046 case HCI_REQ_DONE:
1047 err = -bt_to_errno(hdev->req_result);
1048 break;
1049
1050 case HCI_REQ_CANCELED:
1051 err = -hdev->req_result;
1052 break;
1053
1054 default:
1055 err = -ETIMEDOUT;
1056 break;
1057 }
1058
1059 hdev->req_status = hdev->req_result = 0;
1060
1061 BT_DBG("%s end: err %d", hdev->name, err);
1062
1063 if (err < 0)
1064 return ERR_PTR(err);
1065
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001066 return hci_get_cmd_complete(hdev, opcode, event);
1067}
1068EXPORT_SYMBOL(__hci_cmd_sync_ev);
1069
1070struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03001071 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +03001072{
1073 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +03001074}
1075EXPORT_SYMBOL(__hci_cmd_sync);
1076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001078static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001079 void (*func)(struct hci_request *req,
1080 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001081 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001083 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 DECLARE_WAITQUEUE(wait, current);
1085 int err = 0;
1086
1087 BT_DBG("%s start", hdev->name);
1088
Johan Hedberg42c6b122013-03-05 20:37:49 +02001089 hci_req_init(&req, hdev);
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 hdev->req_status = HCI_REQ_PEND;
1092
Johan Hedberg42c6b122013-03-05 20:37:49 +02001093 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +02001094
Johan Hedberg42c6b122013-03-05 20:37:49 +02001095 err = hci_req_run(&req, hci_req_sync_complete);
1096 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +02001097 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -03001098
1099 /* ENODATA means the HCI request command queue is empty.
1100 * This can happen when a request with conditionals doesn't
1101 * trigger any commands to be sent. This is normal behavior
1102 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +02001103 */
Andre Guedes920c8302013-03-08 11:20:15 -03001104 if (err == -ENODATA)
1105 return 0;
1106
1107 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +02001108 }
1109
Andre Guedesbc4445c2013-03-08 11:20:13 -03001110 add_wait_queue(&hdev->req_wait_q, &wait);
1111 set_current_state(TASK_INTERRUPTIBLE);
1112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 schedule_timeout(timeout);
1114
1115 remove_wait_queue(&hdev->req_wait_q, &wait);
1116
1117 if (signal_pending(current))
1118 return -EINTR;
1119
1120 switch (hdev->req_status) {
1121 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -07001122 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 break;
1124
1125 case HCI_REQ_CANCELED:
1126 err = -hdev->req_result;
1127 break;
1128
1129 default:
1130 err = -ETIMEDOUT;
1131 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
Johan Hedberga5040ef2011-01-10 13:28:59 +02001134 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 BT_DBG("%s end: err %d", hdev->name, err);
1137
1138 return err;
1139}
1140
Johan Hedberg01178cd2013-03-05 20:37:41 +02001141static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +02001142 void (*req)(struct hci_request *req,
1143 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +02001144 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
1146 int ret;
1147
Marcel Holtmann7c6a3292008-09-12 03:11:54 +02001148 if (!test_bit(HCI_UP, &hdev->flags))
1149 return -ENETDOWN;
1150
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 /* Serialize all requests */
1152 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001153 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 hci_req_unlock(hdev);
1155
1156 return ret;
1157}
1158
Johan Hedberg42c6b122013-03-05 20:37:49 +02001159static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001161 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001164 set_bit(HCI_RESET, &req->hdev->flags);
1165 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166}
1167
Johan Hedberg42c6b122013-03-05 20:37:49 +02001168static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001170 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001171
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001173 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
Marcel Holtmann1143e5a2006-09-23 09:57:20 +02001175 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001176 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001177
1178 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001179 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180}
1181
Johan Hedberg42c6b122013-03-05 20:37:49 +02001182static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001183{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001184 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +02001185
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001186 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001187 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001188
Marcel Holtmannf6996cf2013-10-07 02:31:39 -07001189 /* Read Local Supported Commands */
1190 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1191
1192 /* Read Local Supported Features */
1193 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1194
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +03001195 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001196 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +03001197
1198 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001199 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001200
Marcel Holtmannf38ba942013-10-07 03:55:53 -07001201 /* Read Flow Control Mode */
1202 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1203
Marcel Holtmann7528ca12013-10-07 03:55:52 -07001204 /* Read Location Data */
1205 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001206}
1207
Johan Hedberg42c6b122013-03-05 20:37:49 +02001208static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001209{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001210 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001211
1212 BT_DBG("%s %ld", hdev->name, opt);
1213
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001214 /* Reset */
1215 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001216 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +03001217
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001218 switch (hdev->dev_type) {
1219 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001220 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001221 break;
1222
1223 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +02001224 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001225 break;
1226
1227 default:
1228 BT_ERR("Unknown device type %d", hdev->dev_type);
1229 break;
1230 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +02001231}
1232
Johan Hedberg42c6b122013-03-05 20:37:49 +02001233static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001234{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001235 struct hci_dev *hdev = req->hdev;
1236
Johan Hedberg2177bab2013-03-05 20:37:43 +02001237 __le16 param;
1238 __u8 flt_type;
1239
1240 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001241 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001242
1243 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001244 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001245
1246 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001247 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001248
1249 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001250 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001251
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07001252 /* Read Number of Supported IAC */
1253 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1254
Marcel Holtmann4b836f32013-10-14 14:06:36 -07001255 /* Read Current IAC LAP */
1256 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1257
Johan Hedberg2177bab2013-03-05 20:37:43 +02001258 /* Clear Event Filters */
1259 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001260 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001261
1262 /* Connection accept timeout ~20 secs */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001263 param = cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001264 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001265
Marcel Holtmann4ca048e2013-10-11 16:42:07 -07001266 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1267 * but it does not support page scan related HCI commands.
1268 */
1269 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -05001270 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1271 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1272 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001273}
1274
Johan Hedberg42c6b122013-03-05 20:37:49 +02001275static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001276{
Johan Hedbergc73eee92013-04-19 18:35:21 +03001277 struct hci_dev *hdev = req->hdev;
1278
Johan Hedberg2177bab2013-03-05 20:37:43 +02001279 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001281
1282 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001284
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001285 /* Read LE Supported States */
1286 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1287
Johan Hedberg2177bab2013-03-05 20:37:43 +02001288 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001289 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001290
1291 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001292 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001293
Marcel Holtmann747d3f02014-02-27 20:37:29 -08001294 /* Clear LE White List */
1295 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +03001296
1297 /* LE-only controllers have LE implicitly enabled */
1298 if (!lmp_bredr_capable(hdev))
1299 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001300}
1301
1302static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1303{
1304 if (lmp_ext_inq_capable(hdev))
1305 return 0x02;
1306
1307 if (lmp_inq_rssi_capable(hdev))
1308 return 0x01;
1309
1310 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1311 hdev->lmp_subver == 0x0757)
1312 return 0x01;
1313
1314 if (hdev->manufacturer == 15) {
1315 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1316 return 0x01;
1317 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1318 return 0x01;
1319 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1320 return 0x01;
1321 }
1322
1323 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1324 hdev->lmp_subver == 0x1805)
1325 return 0x01;
1326
1327 return 0x00;
1328}
1329
Johan Hedberg42c6b122013-03-05 20:37:49 +02001330static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001331{
1332 u8 mode;
1333
Johan Hedberg42c6b122013-03-05 20:37:49 +02001334 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001335
Johan Hedberg42c6b122013-03-05 20:37:49 +02001336 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001337}
1338
Johan Hedberg42c6b122013-03-05 20:37:49 +02001339static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001340{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001341 struct hci_dev *hdev = req->hdev;
1342
Johan Hedberg2177bab2013-03-05 20:37:43 +02001343 /* The second byte is 0xff instead of 0x9f (two reserved bits
1344 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1345 * command otherwise.
1346 */
1347 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1348
1349 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1350 * any event mask for pre 1.2 devices.
1351 */
1352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1353 return;
1354
1355 if (lmp_bredr_capable(hdev)) {
1356 events[4] |= 0x01; /* Flow Specification Complete */
1357 events[4] |= 0x02; /* Inquiry Result with RSSI */
1358 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1359 events[5] |= 0x08; /* Synchronous Connection Complete */
1360 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -07001361 } else {
1362 /* Use a different default for LE-only devices */
1363 memset(events, 0, sizeof(events));
1364 events[0] |= 0x10; /* Disconnection Complete */
1365 events[0] |= 0x80; /* Encryption Change */
1366 events[1] |= 0x08; /* Read Remote Version Information Complete */
1367 events[1] |= 0x20; /* Command Complete */
1368 events[1] |= 0x40; /* Command Status */
1369 events[1] |= 0x80; /* Hardware Error */
1370 events[2] |= 0x04; /* Number of Completed Packets */
1371 events[3] |= 0x02; /* Data Buffer Overflow */
1372 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +02001373 }
1374
1375 if (lmp_inq_rssi_capable(hdev))
1376 events[4] |= 0x02; /* Inquiry Result with RSSI */
1377
1378 if (lmp_sniffsubr_capable(hdev))
1379 events[5] |= 0x20; /* Sniff Subrating */
1380
1381 if (lmp_pause_enc_capable(hdev))
1382 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1383
1384 if (lmp_ext_inq_capable(hdev))
1385 events[5] |= 0x40; /* Extended Inquiry Result */
1386
1387 if (lmp_no_flush_capable(hdev))
1388 events[7] |= 0x01; /* Enhanced Flush Complete */
1389
1390 if (lmp_lsto_capable(hdev))
1391 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1392
1393 if (lmp_ssp_capable(hdev)) {
1394 events[6] |= 0x01; /* IO Capability Request */
1395 events[6] |= 0x02; /* IO Capability Response */
1396 events[6] |= 0x04; /* User Confirmation Request */
1397 events[6] |= 0x08; /* User Passkey Request */
1398 events[6] |= 0x10; /* Remote OOB Data Request */
1399 events[6] |= 0x20; /* Simple Pairing Complete */
1400 events[7] |= 0x04; /* User Passkey Notification */
1401 events[7] |= 0x08; /* Keypress Notification */
1402 events[7] |= 0x10; /* Remote Host Supported
1403 * Features Notification
1404 */
1405 }
1406
1407 if (lmp_le_capable(hdev))
1408 events[7] |= 0x20; /* LE Meta-Event */
1409
Johan Hedberg42c6b122013-03-05 20:37:49 +02001410 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001411
1412 if (lmp_le_capable(hdev)) {
1413 memset(events, 0, sizeof(events));
1414 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001415 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1416 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001417 }
1418}
1419
Johan Hedberg42c6b122013-03-05 20:37:49 +02001420static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001421{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001422 struct hci_dev *hdev = req->hdev;
1423
Johan Hedberg2177bab2013-03-05 20:37:43 +02001424 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001425 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +03001426 else
1427 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001428
1429 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001430 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001431
Johan Hedberg42c6b122013-03-05 20:37:49 +02001432 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001433
Johan Hedberg3f8e2d72013-07-24 02:32:46 +03001434 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1435 * local supported commands HCI command.
1436 */
1437 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001438 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001439
1440 if (lmp_ssp_capable(hdev)) {
Marcel Holtmann57af75a2013-10-18 12:04:47 -07001441 /* When SSP is available, then the host features page
1442 * should also be available as well. However some
1443 * controllers list the max_page as 0 as long as SSP
1444 * has not been enabled. To achieve proper debugging
1445 * output, force the minimum max_page to 1 at least.
1446 */
1447 hdev->max_page = 0x01;
1448
Johan Hedberg2177bab2013-03-05 20:37:43 +02001449 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1450 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001451 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1452 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001453 } else {
1454 struct hci_cp_write_eir cp;
1455
1456 memset(hdev->eir, 0, sizeof(hdev->eir));
1457 memset(&cp, 0, sizeof(cp));
1458
Johan Hedberg42c6b122013-03-05 20:37:49 +02001459 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001460 }
1461 }
1462
1463 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001464 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001465
1466 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001467 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001468
1469 if (lmp_ext_feat_capable(hdev)) {
1470 struct hci_cp_read_local_ext_features cp;
1471
1472 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001473 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1474 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001475 }
1476
1477 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1478 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001479 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1480 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001481 }
1482}
1483
Johan Hedberg42c6b122013-03-05 20:37:49 +02001484static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001485{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001486 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001487 struct hci_cp_write_def_link_policy cp;
1488 u16 link_policy = 0;
1489
1490 if (lmp_rswitch_capable(hdev))
1491 link_policy |= HCI_LP_RSWITCH;
1492 if (lmp_hold_capable(hdev))
1493 link_policy |= HCI_LP_HOLD;
1494 if (lmp_sniff_capable(hdev))
1495 link_policy |= HCI_LP_SNIFF;
1496 if (lmp_park_capable(hdev))
1497 link_policy |= HCI_LP_PARK;
1498
1499 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +02001500 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001501}
1502
Johan Hedberg42c6b122013-03-05 20:37:49 +02001503static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001504{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001505 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001506 struct hci_cp_write_le_host_supported cp;
1507
Johan Hedbergc73eee92013-04-19 18:35:21 +03001508 /* LE-only devices do not support explicit enablement */
1509 if (!lmp_bredr_capable(hdev))
1510 return;
1511
Johan Hedberg2177bab2013-03-05 20:37:43 +02001512 memset(&cp, 0, sizeof(cp));
1513
1514 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1515 cp.le = 0x01;
1516 cp.simul = lmp_le_br_capable(hdev);
1517 }
1518
1519 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001520 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1521 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001522}
1523
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001524static void hci_set_event_mask_page_2(struct hci_request *req)
1525{
1526 struct hci_dev *hdev = req->hdev;
1527 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1528
1529 /* If Connectionless Slave Broadcast master role is supported
1530 * enable all necessary events for it.
1531 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001532 if (lmp_csb_master_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001533 events[1] |= 0x40; /* Triggered Clock Capture */
1534 events[1] |= 0x80; /* Synchronization Train Complete */
1535 events[2] |= 0x10; /* Slave Page Response Timeout */
1536 events[2] |= 0x20; /* CSB Channel Map Change */
1537 }
1538
1539 /* If Connectionless Slave Broadcast slave role is supported
1540 * enable all necessary events for it.
1541 */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001542 if (lmp_csb_slave_capable(hdev)) {
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001543 events[2] |= 0x01; /* Synchronization Train Received */
1544 events[2] |= 0x02; /* CSB Receive */
1545 events[2] |= 0x04; /* CSB Timeout */
1546 events[2] |= 0x08; /* Truncated Page Complete */
1547 }
1548
Marcel Holtmann40c59fc2014-01-10 02:07:21 -08001549 /* Enable Authenticated Payload Timeout Expired event if supported */
1550 if (lmp_ping_capable(hdev))
1551 events[2] |= 0x80;
1552
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001553 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1554}
1555
Johan Hedberg42c6b122013-03-05 20:37:49 +02001556static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +02001557{
Johan Hedberg42c6b122013-03-05 20:37:49 +02001558 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001559 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001560
Gustavo Padovanb8f4e062013-06-13 12:34:31 +01001561 /* Some Broadcom based Bluetooth controllers do not support the
1562 * Delete Stored Link Key command. They are clearly indicating its
1563 * absence in the bit mask of supported commands.
1564 *
1565 * Check the supported commands and only if the the command is marked
1566 * as supported send it. If not supported assume that the controller
1567 * does not have actual support for stored link keys which makes this
1568 * command redundant anyway.
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001569 *
1570 * Some controllers indicate that they support handling deleting
1571 * stored link keys, but they don't. The quirk lets a driver
1572 * just disable this command.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -07001573 */
Marcel Holtmannf9f462f2014-01-03 03:02:35 -08001574 if (hdev->commands[6] & 0x80 &&
1575 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
Johan Hedberg59f45d52013-06-13 11:01:13 +03001576 struct hci_cp_delete_stored_link_key cp;
1577
1578 bacpy(&cp.bdaddr, BDADDR_ANY);
1579 cp.delete_all = 0x01;
1580 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1581 sizeof(cp), &cp);
1582 }
1583
Johan Hedberg2177bab2013-03-05 20:37:43 +02001584 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +02001585 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001586
Johan Hedberg7bf32042014-02-23 19:42:29 +02001587 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +02001588 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +03001589
1590 /* Read features beyond page 1 if available */
1591 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1592 struct hci_cp_read_local_ext_features cp;
1593
1594 cp.page = p;
1595 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1596 sizeof(cp), &cp);
1597 }
Johan Hedberg2177bab2013-03-05 20:37:43 +02001598}
1599
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001600static void hci_init4_req(struct hci_request *req, unsigned long opt)
1601{
1602 struct hci_dev *hdev = req->hdev;
1603
Johan Hedbergd62e6d62013-09-13 11:40:02 +03001604 /* Set event mask page 2 if the HCI command for it is supported */
1605 if (hdev->commands[22] & 0x04)
1606 hci_set_event_mask_page_2(req);
1607
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001608 /* Check for Synchronization Train support */
Marcel Holtmann53b834d22013-12-08 11:55:33 -08001609 if (lmp_sync_train_capable(hdev))
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001610 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001611
1612 /* Enable Secure Connections if supported and configured */
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001613 if ((lmp_sc_capable(hdev) ||
Marcel Holtmann111902f2014-06-21 04:53:17 +02001614 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
Marcel Holtmanna6d0d692014-01-10 02:07:24 -08001615 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1616 u8 support = 0x01;
1617 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1618 sizeof(support), &support);
1619 }
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001620}
1621
Johan Hedberg2177bab2013-03-05 20:37:43 +02001622static int __hci_init(struct hci_dev *hdev)
1623{
1624 int err;
1625
1626 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1627 if (err < 0)
1628 return err;
1629
Marcel Holtmann4b4148e2013-10-19 07:09:12 -07001630 /* The Device Under Test (DUT) mode is special and available for
1631 * all controller types. So just create it early on.
1632 */
1633 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1634 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1635 &dut_mode_fops);
1636 }
1637
Johan Hedberg2177bab2013-03-05 20:37:43 +02001638 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1639 * BR/EDR/LE type controllers. AMP controllers only need the
1640 * first stage init.
1641 */
1642 if (hdev->dev_type != HCI_BREDR)
1643 return 0;
1644
1645 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1646 if (err < 0)
1647 return err;
1648
Johan Hedberg5d4e7e82013-09-13 11:40:01 +03001649 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1650 if (err < 0)
1651 return err;
1652
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001653 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1654 if (err < 0)
1655 return err;
1656
1657 /* Only create debugfs entries during the initial setup
1658 * phase and not every time the controller gets powered on.
1659 */
1660 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1661 return 0;
1662
Marcel Holtmanndfb826a2013-10-18 12:04:46 -07001663 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1664 &features_fops);
Marcel Holtmannceeb3bc2013-10-18 12:04:49 -07001665 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1666 &hdev->manufacturer);
1667 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1668 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
Marcel Holtmann70afe0b2013-10-17 17:24:14 -07001669 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1670 &blacklist_fops);
Marcel Holtmann47219832013-10-17 17:24:15 -07001671 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1672
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02001673 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1674 &conn_info_min_age_fops);
1675 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1676 &conn_info_max_age_fops);
1677
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001678 if (lmp_bredr_capable(hdev)) {
1679 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1680 hdev, &inquiry_cache_fops);
Marcel Holtmann02d08d12013-10-18 12:04:52 -07001681 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1682 hdev, &link_keys_fops);
Marcel Holtmannbabdbb32013-10-18 12:04:51 -07001683 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1684 hdev, &dev_class_fops);
Marcel Holtmann041000b2013-10-17 12:02:31 -07001685 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1686 hdev, &voice_setting_fops);
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001687 }
1688
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001689 if (lmp_ssp_capable(hdev)) {
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001690 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1691 hdev, &auto_accept_delay_fops);
Marcel Holtmann5afeac142014-01-10 02:07:27 -08001692 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1693 hdev, &force_sc_support_fops);
Marcel Holtmann134c2a82014-01-15 22:37:42 -08001694 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1695 hdev, &sc_only_mode_fops);
Marcel Holtmann06f5b772013-10-19 07:09:11 -07001696 }
Marcel Holtmannebd1e332013-10-17 10:54:46 -07001697
Marcel Holtmann2bfa3532013-10-17 19:16:02 -07001698 if (lmp_sniff_capable(hdev)) {
1699 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1700 hdev, &idle_timeout_fops);
1701 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1702 hdev, &sniff_min_interval_fops);
1703 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1704 hdev, &sniff_max_interval_fops);
1705 }
1706
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001707 if (lmp_le_capable(hdev)) {
Marcel Holtmannac345812014-02-23 12:44:25 -08001708 debugfs_create_file("identity", 0400, hdev->debugfs,
1709 hdev, &identity_fops);
1710 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1711 hdev, &rpa_timeout_fops);
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08001712 debugfs_create_file("random_address", 0444, hdev->debugfs,
1713 hdev, &random_address_fops);
Marcel Holtmannb32bba62014-02-19 19:31:26 -08001714 debugfs_create_file("static_address", 0444, hdev->debugfs,
1715 hdev, &static_address_fops);
1716
1717 /* For controllers with a public address, provide a debug
1718 * option to force the usage of the configured static
1719 * address. By default the public address is used.
1720 */
1721 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1722 debugfs_create_file("force_static_address", 0644,
1723 hdev->debugfs, hdev,
1724 &force_static_address_fops);
1725
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001726 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1727 &hdev->le_white_list_size);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08001728 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1729 &white_list_fops);
Marcel Holtmann3698d702014-02-18 21:54:49 -08001730 debugfs_create_file("identity_resolving_keys", 0400,
1731 hdev->debugfs, hdev,
1732 &identity_resolving_keys_fops);
Marcel Holtmann8f8625c2013-10-18 15:56:57 -07001733 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1734 hdev, &long_term_keys_fops);
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07001735 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1736 hdev, &conn_min_interval_fops);
1737 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1738 hdev, &conn_max_interval_fops);
Marcel Holtmann816a93d2014-06-30 12:34:37 +02001739 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1740 hdev, &conn_latency_fops);
Marcel Holtmann3f959d42014-02-20 11:55:56 -08001741 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1742 hdev, &adv_channel_map_fops);
Marcel Holtmann0b3c7d32014-06-29 16:15:49 +02001743 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1744 &device_list_fops);
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01001745 debugfs_create_u16("discov_interleaved_timeout", 0644,
1746 hdev->debugfs,
1747 &hdev->discov_interleaved_timeout);
Marcel Holtmannd0f729b2013-10-18 15:23:46 -07001748 }
Marcel Holtmanne7b8fc92013-10-17 11:45:09 -07001749
Marcel Holtmannbaf27f62013-10-16 03:28:55 -07001750 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +02001751}
1752
Johan Hedberg42c6b122013-03-05 20:37:49 +02001753static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754{
1755 __u8 scan = opt;
1756
Johan Hedberg42c6b122013-03-05 20:37:49 +02001757 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
1759 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001760 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761}
1762
Johan Hedberg42c6b122013-03-05 20:37:49 +02001763static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764{
1765 __u8 auth = opt;
1766
Johan Hedberg42c6b122013-03-05 20:37:49 +02001767 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001770 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772
Johan Hedberg42c6b122013-03-05 20:37:49 +02001773static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
1775 __u8 encrypt = opt;
1776
Johan Hedberg42c6b122013-03-05 20:37:49 +02001777 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001779 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001780 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781}
1782
Johan Hedberg42c6b122013-03-05 20:37:49 +02001783static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001784{
1785 __le16 policy = cpu_to_le16(opt);
1786
Johan Hedberg42c6b122013-03-05 20:37:49 +02001787 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001788
1789 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +02001790 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001791}
1792
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001793/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 * Device is held on return. */
1795struct hci_dev *hci_dev_get(int index)
1796{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001797 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 BT_DBG("%d", index);
1800
1801 if (index < 0)
1802 return NULL;
1803
1804 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001805 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 if (d->id == index) {
1807 hdev = hci_dev_hold(d);
1808 break;
1809 }
1810 }
1811 read_unlock(&hci_dev_list_lock);
1812 return hdev;
1813}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +02001816
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001817bool hci_discovery_active(struct hci_dev *hdev)
1818{
1819 struct discovery_state *discov = &hdev->discovery;
1820
Andre Guedes6fbe1952012-02-03 17:47:58 -03001821 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -03001822 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -03001823 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001824 return true;
1825
Andre Guedes6fbe1952012-02-03 17:47:58 -03001826 default:
1827 return false;
1828 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001829}
1830
Johan Hedbergff9ef572012-01-04 14:23:45 +02001831void hci_discovery_set_state(struct hci_dev *hdev, int state)
1832{
1833 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1834
1835 if (hdev->discovery.state == state)
1836 return;
1837
1838 switch (state) {
1839 case DISCOVERY_STOPPED:
Andre Guedesc54c3862014-02-26 20:21:50 -03001840 hci_update_background_scan(hdev);
1841
Andre Guedes7b99b652012-02-13 15:41:02 -03001842 if (hdev->discovery.state != DISCOVERY_STARTING)
1843 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +02001844 break;
1845 case DISCOVERY_STARTING:
1846 break;
Andre Guedes343f9352012-02-17 20:39:37 -03001847 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +02001848 mgmt_discovering(hdev, 1);
1849 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001850 case DISCOVERY_RESOLVING:
1851 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +02001852 case DISCOVERY_STOPPING:
1853 break;
1854 }
1855
1856 hdev->discovery.state = state;
1857}
1858
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001859void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860{
Johan Hedberg30883512012-01-04 14:16:21 +02001861 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001862 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
Johan Hedberg561aafb2012-01-04 13:31:59 +02001864 list_for_each_entry_safe(p, n, &cache->all, all) {
1865 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001866 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 }
Johan Hedberg561aafb2012-01-04 13:31:59 +02001868
1869 INIT_LIST_HEAD(&cache->unknown);
1870 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871}
1872
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001873struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1874 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875{
Johan Hedberg30883512012-01-04 14:16:21 +02001876 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 struct inquiry_entry *e;
1878
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001879 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880
Johan Hedberg561aafb2012-01-04 13:31:59 +02001881 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001883 return e;
1884 }
1885
1886 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887}
1888
Johan Hedberg561aafb2012-01-04 13:31:59 +02001889struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001890 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +02001891{
Johan Hedberg30883512012-01-04 14:16:21 +02001892 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001893 struct inquiry_entry *e;
1894
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001895 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +02001896
1897 list_for_each_entry(e, &cache->unknown, list) {
1898 if (!bacmp(&e->data.bdaddr, bdaddr))
1899 return e;
1900 }
1901
1902 return NULL;
1903}
1904
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001905struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001906 bdaddr_t *bdaddr,
1907 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001908{
1909 struct discovery_state *cache = &hdev->discovery;
1910 struct inquiry_entry *e;
1911
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001912 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +02001913
1914 list_for_each_entry(e, &cache->resolve, list) {
1915 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1916 return e;
1917 if (!bacmp(&e->data.bdaddr, bdaddr))
1918 return e;
1919 }
1920
1921 return NULL;
1922}
1923
Johan Hedberga3d4e202012-01-09 00:53:02 +02001924void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001925 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +02001926{
1927 struct discovery_state *cache = &hdev->discovery;
1928 struct list_head *pos = &cache->resolve;
1929 struct inquiry_entry *p;
1930
1931 list_del(&ie->list);
1932
1933 list_for_each_entry(p, &cache->resolve, list) {
1934 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001935 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +02001936 break;
1937 pos = &p->list;
1938 }
1939
1940 list_add(&ie->list, pos);
1941}
1942
Johan Hedberg31754052012-01-04 13:39:52 +02001943bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001944 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Johan Hedberg30883512012-01-04 14:16:21 +02001946 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001947 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001949 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
Szymon Janc2b2fec42012-11-20 11:38:54 +01001951 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1952
Johan Hedberg01735bb2014-03-25 12:06:18 +02001953 *ssp = data->ssp_mode;
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001954
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001955 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +02001956 if (ie) {
Johan Hedberg8002d772014-03-27 13:51:24 +02001957 if (ie->data.ssp_mode)
Johan Hedberg388fc8f2012-02-23 00:38:59 +02001958 *ssp = true;
1959
Johan Hedberga3d4e202012-01-09 00:53:02 +02001960 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001961 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001962 ie->data.rssi = data->rssi;
1963 hci_inquiry_cache_update_resolve(hdev, ie);
1964 }
1965
Johan Hedberg561aafb2012-01-04 13:31:59 +02001966 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001967 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001968
Johan Hedberg561aafb2012-01-04 13:31:59 +02001969 /* Entry not in the cache. Add new one. */
1970 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1971 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001972 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001973
1974 list_add(&ie->all, &cache->all);
1975
1976 if (name_known) {
1977 ie->name_state = NAME_KNOWN;
1978 } else {
1979 ie->name_state = NAME_NOT_KNOWN;
1980 list_add(&ie->list, &cache->unknown);
1981 }
1982
1983update:
1984 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001985 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001986 ie->name_state = NAME_KNOWN;
1987 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 }
1989
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001990 memcpy(&ie->data, data, sizeof(*data));
1991 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001993
1994 if (ie->name_state == NAME_NOT_KNOWN)
1995 return false;
1996
1997 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998}
1999
2000static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2001{
Johan Hedberg30883512012-01-04 14:16:21 +02002002 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 struct inquiry_info *info = (struct inquiry_info *) buf;
2004 struct inquiry_entry *e;
2005 int copied = 0;
2006
Johan Hedberg561aafb2012-01-04 13:31:59 +02002007 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002009
2010 if (copied >= num)
2011 break;
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 bacpy(&info->bdaddr, &data->bdaddr);
2014 info->pscan_rep_mode = data->pscan_rep_mode;
2015 info->pscan_period_mode = data->pscan_period_mode;
2016 info->pscan_mode = data->pscan_mode;
2017 memcpy(info->dev_class, data->dev_class, 3);
2018 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02002021 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022 }
2023
2024 BT_DBG("cache %p, copied %d", cache, copied);
2025 return copied;
2026}
2027
Johan Hedberg42c6b122013-03-05 20:37:49 +02002028static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029{
2030 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002031 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 struct hci_cp_inquiry cp;
2033
2034 BT_DBG("%s", hdev->name);
2035
2036 if (test_bit(HCI_INQUIRY, &hdev->flags))
2037 return;
2038
2039 /* Start Inquiry */
2040 memcpy(&cp.lap, &ir->lap, 3);
2041 cp.length = ir->length;
2042 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02002043 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
Andre Guedes3e13fa12013-03-27 20:04:56 -03002046static int wait_inquiry(void *word)
2047{
2048 schedule();
2049 return signal_pending(current);
2050}
2051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052int hci_inquiry(void __user *arg)
2053{
2054 __u8 __user *ptr = arg;
2055 struct hci_inquiry_req ir;
2056 struct hci_dev *hdev;
2057 int err = 0, do_inquiry = 0, max_rsp;
2058 long timeo;
2059 __u8 *buf;
2060
2061 if (copy_from_user(&ir, ptr, sizeof(ir)))
2062 return -EFAULT;
2063
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002064 hdev = hci_dev_get(ir.dev_id);
2065 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 return -ENODEV;
2067
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002068 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2069 err = -EBUSY;
2070 goto done;
2071 }
2072
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002073 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2074 err = -EOPNOTSUPP;
2075 goto done;
2076 }
2077
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002078 if (hdev->dev_type != HCI_BREDR) {
2079 err = -EOPNOTSUPP;
2080 goto done;
2081 }
2082
Johan Hedberg56f87902013-10-02 13:43:13 +03002083 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2084 err = -EOPNOTSUPP;
2085 goto done;
2086 }
2087
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002088 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002089 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002090 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002091 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 do_inquiry = 1;
2093 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002094 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
Marcel Holtmann04837f62006-07-03 10:02:33 +02002096 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002097
2098 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02002099 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2100 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002101 if (err < 0)
2102 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03002103
2104 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2105 * cleared). If it is interrupted by a signal, return -EINTR.
2106 */
2107 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2108 TASK_INTERRUPTIBLE))
2109 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002112 /* for unlimited number of responses we will use buffer with
2113 * 255 entries
2114 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2116
2117 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2118 * copy it to the user space.
2119 */
Szymon Janc01df8c32011-02-17 16:46:47 +01002120 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002121 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 err = -ENOMEM;
2123 goto done;
2124 }
2125
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002126 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002128 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130 BT_DBG("num_rsp %d", ir.num_rsp);
2131
2132 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2133 ptr += sizeof(ir);
2134 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002135 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002137 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 err = -EFAULT;
2139
2140 kfree(buf);
2141
2142done:
2143 hci_dev_put(hdev);
2144 return err;
2145}
2146
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002147static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 int ret = 0;
2150
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 BT_DBG("%s %p", hdev->name, hdev);
2152
2153 hci_req_lock(hdev);
2154
Johan Hovold94324962012-03-15 14:48:41 +01002155 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2156 ret = -ENODEV;
2157 goto done;
2158 }
2159
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002160 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2161 /* Check for rfkill but allow the HCI setup stage to
2162 * proceed (which in itself doesn't cause any RF activity).
2163 */
2164 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2165 ret = -ERFKILL;
2166 goto done;
2167 }
2168
2169 /* Check for valid public address or a configured static
2170 * random adddress, but let the HCI setup proceed to
2171 * be able to determine if there is a public address
2172 * or not.
2173 *
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002174 * In case of user channel usage, it is not important
2175 * if a public address or static random address is
2176 * available.
2177 *
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002178 * This check is only valid for BR/EDR controllers
2179 * since AMP controllers do not have an address.
2180 */
Marcel Holtmannc6beca02014-02-17 09:21:19 -08002181 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2182 hdev->dev_type == HCI_BREDR &&
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002183 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2184 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2185 ret = -EADDRNOTAVAIL;
2186 goto done;
2187 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002188 }
2189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 if (test_bit(HCI_UP, &hdev->flags)) {
2191 ret = -EALREADY;
2192 goto done;
2193 }
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 if (hdev->open(hdev)) {
2196 ret = -EIO;
2197 goto done;
2198 }
2199
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002200 atomic_set(&hdev->cmd_cnt, 1);
2201 set_bit(HCI_INIT, &hdev->flags);
2202
2203 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2204 ret = hdev->setup(hdev);
2205
2206 if (!ret) {
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002207 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002208 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002209 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 }
2211
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09002212 clear_bit(HCI_INIT, &hdev->flags);
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 if (!ret) {
2215 hci_dev_hold(hdev);
Johan Hedbergd6bfd592014-02-23 19:42:20 +02002216 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 set_bit(HCI_UP, &hdev->flags);
2218 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03002219 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002220 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07002221 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002222 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002223 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002224 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002225 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002226 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002228 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002229 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002230 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 skb_queue_purge(&hdev->cmd_q);
2233 skb_queue_purge(&hdev->rx_q);
2234
2235 if (hdev->flush)
2236 hdev->flush(hdev);
2237
2238 if (hdev->sent_cmd) {
2239 kfree_skb(hdev->sent_cmd);
2240 hdev->sent_cmd = NULL;
2241 }
2242
2243 hdev->close(hdev);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002244 hdev->flags &= BIT(HCI_RAW);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 }
2246
2247done:
2248 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return ret;
2250}
2251
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002252/* ---- HCI ioctl helpers ---- */
2253
2254int hci_dev_open(__u16 dev)
2255{
2256 struct hci_dev *hdev;
2257 int err;
2258
2259 hdev = hci_dev_get(dev);
2260 if (!hdev)
2261 return -ENODEV;
2262
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002263 /* Devices that are marked for raw-only usage can only be powered
2264 * up as user channel. Trying to bring them up as normal devices
2265 * will result into a failure. Only user channel operation is
2266 * possible.
2267 *
2268 * When this function is called for a user channel, the flag
2269 * HCI_USER_CHANNEL will be set first before attempting to
2270 * open the device.
2271 */
2272 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
2273 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2274 err = -EOPNOTSUPP;
2275 goto done;
2276 }
2277
Johan Hedberge1d08f42013-10-01 22:44:50 +03002278 /* We need to ensure that no other power on/off work is pending
2279 * before proceeding to call hci_dev_do_open. This is
2280 * particularly important if the setup procedure has not yet
2281 * completed.
2282 */
2283 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2284 cancel_delayed_work(&hdev->power_off);
2285
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002286 /* After this call it is guaranteed that the setup procedure
2287 * has finished. This means that error conditions like RFKILL
2288 * or no valid public or static random address apply.
2289 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03002290 flush_workqueue(hdev->req_workqueue);
2291
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002292 err = hci_dev_do_open(hdev);
2293
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002294done:
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002295 hci_dev_put(hdev);
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002296 return err;
2297}
2298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299static int hci_dev_do_close(struct hci_dev *hdev)
2300{
2301 BT_DBG("%s %p", hdev->name, hdev);
2302
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03002303 cancel_delayed_work(&hdev->power_off);
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 hci_req_cancel(hdev, ENODEV);
2306 hci_req_lock(hdev);
2307
2308 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002309 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 hci_req_unlock(hdev);
2311 return 0;
2312 }
2313
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002314 /* Flush RX and TX works */
2315 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002316 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002318 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02002319 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002320 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02002321 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07002322 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002323 }
2324
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002325 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02002326 cancel_delayed_work(&hdev->service_cache);
2327
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002328 cancel_delayed_work_sync(&hdev->le_scan_disable);
Johan Hedberg4518bb02014-02-24 20:35:07 +02002329
2330 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2331 cancel_delayed_work_sync(&hdev->rpa_expired);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002332
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002333 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002334 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 hci_conn_hash_flush(hdev);
Andre Guedes6046dc32014-02-26 20:21:51 -03002336 hci_pend_le_conns_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002337 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
2339 hci_notify(hdev, HCI_DEV_DOWN);
2340
2341 if (hdev->flush)
2342 hdev->flush(hdev);
2343
2344 /* Reset device */
2345 skb_queue_purge(&hdev->cmd_q);
2346 atomic_set(&hdev->cmd_cnt, 1);
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002347 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07002348 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02002349 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02002351 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 clear_bit(HCI_INIT, &hdev->flags);
2353 }
2354
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002355 /* flush cmd work */
2356 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
2358 /* Drop queues */
2359 skb_queue_purge(&hdev->rx_q);
2360 skb_queue_purge(&hdev->cmd_q);
2361 skb_queue_purge(&hdev->raw_q);
2362
2363 /* Drop last sent command */
2364 if (hdev->sent_cmd) {
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02002365 cancel_delayed_work_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 kfree_skb(hdev->sent_cmd);
2367 hdev->sent_cmd = NULL;
2368 }
2369
Johan Hedbergb6ddb632013-04-02 13:34:31 +03002370 kfree_skb(hdev->recv_evt);
2371 hdev->recv_evt = NULL;
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373 /* After this point our queues are empty
2374 * and no tasks are scheduled. */
2375 hdev->close(hdev);
2376
Johan Hedberg35b973c2013-03-15 17:06:59 -05002377 /* Clear flags */
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002378 hdev->flags &= BIT(HCI_RAW);
Johan Hedberg35b973c2013-03-15 17:06:59 -05002379 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2380
Marcel Holtmann93c311a2013-10-07 00:58:33 -07002381 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2382 if (hdev->dev_type == HCI_BREDR) {
2383 hci_dev_lock(hdev);
2384 mgmt_powered(hdev, 0);
2385 hci_dev_unlock(hdev);
2386 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002387 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02002388
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002389 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07002390 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02002391
Johan Hedberge59fda82012-02-22 18:11:53 +02002392 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02002393 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Marcel Holtmann7a4cd512014-02-19 19:52:13 -08002394 bacpy(&hdev->random_addr, BDADDR_ANY);
Johan Hedberge59fda82012-02-22 18:11:53 +02002395
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396 hci_req_unlock(hdev);
2397
2398 hci_dev_put(hdev);
2399 return 0;
2400}
2401
2402int hci_dev_close(__u16 dev)
2403{
2404 struct hci_dev *hdev;
2405 int err;
2406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002407 hdev = hci_dev_get(dev);
2408 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002410
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002411 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2412 err = -EBUSY;
2413 goto done;
2414 }
2415
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002416 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2417 cancel_delayed_work(&hdev->power_off);
2418
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002420
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002421done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 hci_dev_put(hdev);
2423 return err;
2424}
2425
2426int hci_dev_reset(__u16 dev)
2427{
2428 struct hci_dev *hdev;
2429 int ret = 0;
2430
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002431 hdev = hci_dev_get(dev);
2432 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433 return -ENODEV;
2434
2435 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436
Marcel Holtmann808a0492013-08-26 20:57:58 -07002437 if (!test_bit(HCI_UP, &hdev->flags)) {
2438 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002439 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07002440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002442 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2443 ret = -EBUSY;
2444 goto done;
2445 }
2446
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002447 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2448 ret = -EOPNOTSUPP;
2449 goto done;
2450 }
2451
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 /* Drop queues */
2453 skb_queue_purge(&hdev->rx_q);
2454 skb_queue_purge(&hdev->cmd_q);
2455
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002456 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03002457 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002459 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
2461 if (hdev->flush)
2462 hdev->flush(hdev);
2463
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002464 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002465 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002467 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
2469done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 hci_req_unlock(hdev);
2471 hci_dev_put(hdev);
2472 return ret;
2473}
2474
2475int hci_dev_reset_stat(__u16 dev)
2476{
2477 struct hci_dev *hdev;
2478 int ret = 0;
2479
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002480 hdev = hci_dev_get(dev);
2481 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482 return -ENODEV;
2483
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002484 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2485 ret = -EBUSY;
2486 goto done;
2487 }
2488
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002489 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2490 ret = -EOPNOTSUPP;
2491 goto done;
2492 }
2493
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2495
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002496done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498 return ret;
2499}
2500
2501int hci_dev_cmd(unsigned int cmd, void __user *arg)
2502{
2503 struct hci_dev *hdev;
2504 struct hci_dev_req dr;
2505 int err = 0;
2506
2507 if (copy_from_user(&dr, arg, sizeof(dr)))
2508 return -EFAULT;
2509
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002510 hdev = hci_dev_get(dr.dev_id);
2511 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 return -ENODEV;
2513
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002514 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2515 err = -EBUSY;
2516 goto done;
2517 }
2518
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002519 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
2520 err = -EOPNOTSUPP;
2521 goto done;
2522 }
2523
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07002524 if (hdev->dev_type != HCI_BREDR) {
2525 err = -EOPNOTSUPP;
2526 goto done;
2527 }
2528
Johan Hedberg56f87902013-10-02 13:43:13 +03002529 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2530 err = -EOPNOTSUPP;
2531 goto done;
2532 }
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 switch (cmd) {
2535 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002536 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2537 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 break;
2539
2540 case HCISETENCRYPT:
2541 if (!lmp_encrypt_capable(hdev)) {
2542 err = -EOPNOTSUPP;
2543 break;
2544 }
2545
2546 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2547 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02002548 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2549 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 if (err)
2551 break;
2552 }
2553
Johan Hedberg01178cd2013-03-05 20:37:41 +02002554 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2555 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 break;
2557
2558 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002559 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2560 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 break;
2562
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002563 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02002564 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2565 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002566 break;
2567
2568 case HCISETLINKMODE:
2569 hdev->link_mode = ((__u16) dr.dev_opt) &
2570 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2571 break;
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 case HCISETPTYPE:
2574 hdev->pkt_type = (__u16) dr.dev_opt;
2575 break;
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002578 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2579 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 break;
2581
2582 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002583 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2584 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585 break;
2586
2587 default:
2588 err = -EINVAL;
2589 break;
2590 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02002591
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002592done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593 hci_dev_put(hdev);
2594 return err;
2595}
2596
2597int hci_get_dev_list(void __user *arg)
2598{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002599 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 struct hci_dev_list_req *dl;
2601 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 int n = 0, size, err;
2603 __u16 dev_num;
2604
2605 if (get_user(dev_num, (__u16 __user *) arg))
2606 return -EFAULT;
2607
2608 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2609 return -EINVAL;
2610
2611 size = sizeof(*dl) + dev_num * sizeof(*dr);
2612
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002613 dl = kzalloc(size, GFP_KERNEL);
2614 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 return -ENOMEM;
2616
2617 dr = dl->dev_req;
2618
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002619 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002620 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002621 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02002622 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02002623
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002624 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2625 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002626
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 (dr + n)->dev_id = hdev->id;
2628 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02002629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 if (++n >= dev_num)
2631 break;
2632 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002633 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634
2635 dl->dev_num = n;
2636 size = sizeof(*dl) + n * sizeof(*dr);
2637
2638 err = copy_to_user(arg, dl, size);
2639 kfree(dl);
2640
2641 return err ? -EFAULT : 0;
2642}
2643
2644int hci_get_dev_info(void __user *arg)
2645{
2646 struct hci_dev *hdev;
2647 struct hci_dev_info di;
2648 int err = 0;
2649
2650 if (copy_from_user(&di, arg, sizeof(di)))
2651 return -EFAULT;
2652
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002653 hdev = hci_dev_get(di.dev_id);
2654 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 return -ENODEV;
2656
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002657 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02002658 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002659
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002660 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2661 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02002662
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 strcpy(di.name, hdev->name);
2664 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07002665 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 di.flags = hdev->flags;
2667 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03002668 if (lmp_bredr_capable(hdev)) {
2669 di.acl_mtu = hdev->acl_mtu;
2670 di.acl_pkts = hdev->acl_pkts;
2671 di.sco_mtu = hdev->sco_mtu;
2672 di.sco_pkts = hdev->sco_pkts;
2673 } else {
2674 di.acl_mtu = hdev->le_mtu;
2675 di.acl_pkts = hdev->le_pkts;
2676 di.sco_mtu = 0;
2677 di.sco_pkts = 0;
2678 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 di.link_policy = hdev->link_policy;
2680 di.link_mode = hdev->link_mode;
2681
2682 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2683 memcpy(&di.features, &hdev->features, sizeof(di.features));
2684
2685 if (copy_to_user(arg, &di, sizeof(di)))
2686 err = -EFAULT;
2687
2688 hci_dev_put(hdev);
2689
2690 return err;
2691}
2692
2693/* ---- Interface to HCI drivers ---- */
2694
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002695static int hci_rfkill_set_block(void *data, bool blocked)
2696{
2697 struct hci_dev *hdev = data;
2698
2699 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2700
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07002701 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2702 return -EBUSY;
2703
Johan Hedberg5e130362013-09-13 08:58:17 +03002704 if (blocked) {
2705 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03002706 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2707 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03002708 } else {
2709 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03002710 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002711
2712 return 0;
2713}
2714
2715static const struct rfkill_ops hci_rfkill_ops = {
2716 .set_block = hci_rfkill_set_block,
2717};
2718
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002719static void hci_power_on(struct work_struct *work)
2720{
2721 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002722 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002723
2724 BT_DBG("%s", hdev->name);
2725
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03002726 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03002727 if (err < 0) {
2728 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002729 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03002730 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002731
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07002732 /* During the HCI setup phase, a few error conditions are
2733 * ignored and they need to be checked now. If they are still
2734 * valid, it is important to turn the device back off.
2735 */
2736 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2737 (hdev->dev_type == HCI_BREDR &&
2738 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2739 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03002740 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2741 hci_dev_do_close(hdev);
2742 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02002743 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2744 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03002745 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002746
Marcel Holtmannfee746b2014-06-29 12:13:05 +02002747 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2748 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2749 mgmt_index_added(hdev);
2750 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002751}
2752
2753static void hci_power_off(struct work_struct *work)
2754{
Johan Hedberg32435532011-11-07 22:16:04 +02002755 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002756 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002757
2758 BT_DBG("%s", hdev->name);
2759
Marcel Holtmann8ee56542012-02-21 12:33:48 +01002760 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002761}
2762
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002763static void hci_discov_off(struct work_struct *work)
2764{
2765 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002766
2767 hdev = container_of(work, struct hci_dev, discov_off.work);
2768
2769 BT_DBG("%s", hdev->name);
2770
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07002771 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02002772}
2773
Johan Hedberg35f74982014-02-18 17:14:32 +02002774void hci_uuids_clear(struct hci_dev *hdev)
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002775{
Johan Hedberg48210022013-01-27 00:31:28 +02002776 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002777
Johan Hedberg48210022013-01-27 00:31:28 +02002778 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2779 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002780 kfree(uuid);
2781 }
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002782}
2783
Johan Hedberg35f74982014-02-18 17:14:32 +02002784void hci_link_keys_clear(struct hci_dev *hdev)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002785{
2786 struct list_head *p, *n;
2787
2788 list_for_each_safe(p, n, &hdev->link_keys) {
2789 struct link_key *key;
2790
2791 key = list_entry(p, struct link_key, list);
2792
2793 list_del(p);
2794 kfree(key);
2795 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002796}
2797
Johan Hedberg35f74982014-02-18 17:14:32 +02002798void hci_smp_ltks_clear(struct hci_dev *hdev)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002799{
2800 struct smp_ltk *k, *tmp;
2801
2802 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2803 list_del(&k->list);
2804 kfree(k);
2805 }
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002806}
2807
Johan Hedberg970c4e42014-02-18 10:19:33 +02002808void hci_smp_irks_clear(struct hci_dev *hdev)
2809{
2810 struct smp_irk *k, *tmp;
2811
2812 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2813 list_del(&k->list);
2814 kfree(k);
2815 }
2816}
2817
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002818struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2819{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002820 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002821
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002822 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002823 if (bacmp(bdaddr, &k->bdaddr) == 0)
2824 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002825
2826 return NULL;
2827}
2828
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302829static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002830 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002831{
2832 /* Legacy key */
2833 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302834 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002835
2836 /* Debug keys are insecure so don't store them persistently */
2837 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302838 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002839
2840 /* Changed combination key and there's no previous one */
2841 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302842 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002843
2844 /* Security mode 3 case */
2845 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302846 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002847
2848 /* Neither local nor remote side had no-bonding as requirement */
2849 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302850 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002851
2852 /* Local side had dedicated bonding as requirement */
2853 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302854 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002855
2856 /* Remote side had dedicated bonding as requirement */
2857 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302858 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002859
2860 /* If none of the above criteria match, then don't store the key
2861 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302862 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002863}
2864
Johan Hedberg98a0b842014-01-30 19:40:00 -08002865static bool ltk_type_master(u8 type)
2866{
Johan Hedbergd97c9fb2014-06-18 14:09:40 +03002867 return (type == SMP_LTK);
Johan Hedberg98a0b842014-01-30 19:40:00 -08002868}
2869
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002870struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002871 bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002872{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002873 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002874
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002875 list_for_each_entry(k, &hdev->long_term_keys, list) {
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002876 if (k->ediv != ediv || k->rand != rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002877 continue;
2878
Johan Hedberg98a0b842014-01-30 19:40:00 -08002879 if (ltk_type_master(k->type) != master)
2880 continue;
2881
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002882 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002883 }
2884
2885 return NULL;
2886}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002887
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002888struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg98a0b842014-01-30 19:40:00 -08002889 u8 addr_type, bool master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002890{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002891 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002892
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002893 list_for_each_entry(k, &hdev->long_term_keys, list)
2894 if (addr_type == k->bdaddr_type &&
Johan Hedberg98a0b842014-01-30 19:40:00 -08002895 bacmp(bdaddr, &k->bdaddr) == 0 &&
2896 ltk_type_master(k->type) == master)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002897 return k;
2898
2899 return NULL;
2900}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002901
Johan Hedberg970c4e42014-02-18 10:19:33 +02002902struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2903{
2904 struct smp_irk *irk;
2905
2906 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2907 if (!bacmp(&irk->rpa, rpa))
2908 return irk;
2909 }
2910
2911 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2912 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2913 bacpy(&irk->rpa, rpa);
2914 return irk;
2915 }
2916 }
2917
2918 return NULL;
2919}
2920
2921struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2922 u8 addr_type)
2923{
2924 struct smp_irk *irk;
2925
Johan Hedberg6cfc9982014-02-18 21:41:35 +02002926 /* Identity Address must be public or static random */
2927 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2928 return NULL;
2929
Johan Hedberg970c4e42014-02-18 10:19:33 +02002930 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2931 if (addr_type == irk->addr_type &&
2932 bacmp(bdaddr, &irk->bdaddr) == 0)
2933 return irk;
2934 }
2935
2936 return NULL;
2937}
2938
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002939struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedberg7652ff62014-06-24 13:15:49 +03002940 bdaddr_t *bdaddr, u8 *val, u8 type,
2941 u8 pin_len, bool *persistent)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002942{
2943 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05302944 u8 old_key_type;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002945
2946 old_key = hci_find_link_key(hdev, bdaddr);
2947 if (old_key) {
2948 old_key_type = old_key->type;
2949 key = old_key;
2950 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07002951 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002952 key = kzalloc(sizeof(*key), GFP_KERNEL);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002953 if (!key)
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002954 return NULL;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002955 list_add(&key->list, &hdev->link_keys);
2956 }
2957
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002958 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002959
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002960 /* Some buggy controller combinations generate a changed
2961 * combination key for legacy pairing even when there's no
2962 * previous key */
2963 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002964 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002965 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07002966 if (conn)
2967 conn->key_type = type;
2968 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07002969
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002970 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03002971 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002972 key->pin_len = pin_len;
2973
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02002974 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002975 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07002976 else
2977 key->type = type;
2978
Johan Hedberg7652ff62014-06-24 13:15:49 +03002979 if (persistent)
2980 *persistent = hci_persistent_key(hdev, conn, type,
2981 old_key_type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002982
Johan Hedberg567fa2a2014-06-24 13:15:48 +03002983 return key;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002984}
2985
Johan Hedbergca9142b2014-02-19 14:57:44 +02002986struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
Johan Hedberg35d70272014-02-19 14:57:47 +02002987 u8 addr_type, u8 type, u8 authenticated,
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08002988 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002989{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002990 struct smp_ltk *key, *old_key;
Johan Hedberg98a0b842014-01-30 19:40:00 -08002991 bool master = ltk_type_master(type);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002992
Johan Hedberg98a0b842014-01-30 19:40:00 -08002993 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002994 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002995 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03002996 else {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02002997 key = kzalloc(sizeof(*key), GFP_KERNEL);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03002998 if (!key)
Johan Hedbergca9142b2014-02-19 14:57:44 +02002999 return NULL;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003000 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003001 }
3002
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003003 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003004 key->bdaddr_type = addr_type;
3005 memcpy(key->val, tk, sizeof(key->val));
3006 key->authenticated = authenticated;
3007 key->ediv = ediv;
Marcel Holtmannfe39c7b2014-02-27 16:00:28 -08003008 key->rand = rand;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03003009 key->enc_size = enc_size;
3010 key->type = type;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003011
Johan Hedbergca9142b2014-02-19 14:57:44 +02003012 return key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03003013}
3014
Johan Hedbergca9142b2014-02-19 14:57:44 +02003015struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3016 u8 addr_type, u8 val[16], bdaddr_t *rpa)
Johan Hedberg970c4e42014-02-18 10:19:33 +02003017{
3018 struct smp_irk *irk;
3019
3020 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3021 if (!irk) {
3022 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3023 if (!irk)
Johan Hedbergca9142b2014-02-19 14:57:44 +02003024 return NULL;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003025
3026 bacpy(&irk->bdaddr, bdaddr);
3027 irk->addr_type = addr_type;
3028
3029 list_add(&irk->list, &hdev->identity_resolving_keys);
3030 }
3031
3032 memcpy(irk->val, val, 16);
3033 bacpy(&irk->rpa, rpa);
3034
Johan Hedbergca9142b2014-02-19 14:57:44 +02003035 return irk;
Johan Hedberg970c4e42014-02-18 10:19:33 +02003036}
3037
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003038int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3039{
3040 struct link_key *key;
3041
3042 key = hci_find_link_key(hdev, bdaddr);
3043 if (!key)
3044 return -ENOENT;
3045
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003046 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003047
3048 list_del(&key->list);
3049 kfree(key);
3050
3051 return 0;
3052}
3053
Johan Hedberge0b2b272014-02-18 17:14:31 +02003054int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003055{
3056 struct smp_ltk *k, *tmp;
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003057 int removed = 0;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003058
3059 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
Johan Hedberge0b2b272014-02-18 17:14:31 +02003060 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003061 continue;
3062
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003063 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003064
3065 list_del(&k->list);
3066 kfree(k);
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003067 removed++;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003068 }
3069
Johan Hedbergc51ffa02014-02-18 17:14:33 +02003070 return removed ? 0 : -ENOENT;
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003071}
3072
Johan Hedberga7ec7332014-02-18 17:14:35 +02003073void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3074{
3075 struct smp_irk *k, *tmp;
3076
Johan Hedberg668b7b12014-02-21 16:03:31 +02003077 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
Johan Hedberga7ec7332014-02-18 17:14:35 +02003078 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3079 continue;
3080
3081 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3082
3083 list_del(&k->list);
3084 kfree(k);
3085 }
3086}
3087
Ville Tervo6bd32322011-02-16 16:32:41 +02003088/* HCI command timer function */
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003089static void hci_cmd_timeout(struct work_struct *work)
Ville Tervo6bd32322011-02-16 16:32:41 +02003090{
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003091 struct hci_dev *hdev = container_of(work, struct hci_dev,
3092 cmd_timer.work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003093
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03003094 if (hdev->sent_cmd) {
3095 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3096 u16 opcode = __le16_to_cpu(sent->opcode);
3097
3098 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3099 } else {
3100 BT_ERR("%s command tx timeout", hdev->name);
3101 }
3102
Ville Tervo6bd32322011-02-16 16:32:41 +02003103 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003104 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02003105}
3106
Szymon Janc2763eda2011-03-22 13:12:22 +01003107struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003108 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01003109{
3110 struct oob_data *data;
3111
3112 list_for_each_entry(data, &hdev->remote_oob_data, list)
3113 if (bacmp(bdaddr, &data->bdaddr) == 0)
3114 return data;
3115
3116 return NULL;
3117}
3118
3119int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3120{
3121 struct oob_data *data;
3122
3123 data = hci_find_remote_oob_data(hdev, bdaddr);
3124 if (!data)
3125 return -ENOENT;
3126
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003127 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003128
3129 list_del(&data->list);
3130 kfree(data);
3131
3132 return 0;
3133}
3134
Johan Hedberg35f74982014-02-18 17:14:32 +02003135void hci_remote_oob_data_clear(struct hci_dev *hdev)
Szymon Janc2763eda2011-03-22 13:12:22 +01003136{
3137 struct oob_data *data, *n;
3138
3139 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3140 list_del(&data->list);
3141 kfree(data);
3142 }
Szymon Janc2763eda2011-03-22 13:12:22 +01003143}
3144
Marcel Holtmann07988722014-01-10 02:07:29 -08003145int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3146 u8 *hash, u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01003147{
3148 struct oob_data *data;
3149
3150 data = hci_find_remote_oob_data(hdev, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003151 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003152 data = kmalloc(sizeof(*data), GFP_KERNEL);
Szymon Janc2763eda2011-03-22 13:12:22 +01003153 if (!data)
3154 return -ENOMEM;
3155
3156 bacpy(&data->bdaddr, bdaddr);
3157 list_add(&data->list, &hdev->remote_oob_data);
3158 }
3159
Marcel Holtmann519ca9d2014-01-10 02:07:28 -08003160 memcpy(data->hash192, hash, sizeof(data->hash192));
3161 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
Szymon Janc2763eda2011-03-22 13:12:22 +01003162
Marcel Holtmann07988722014-01-10 02:07:29 -08003163 memset(data->hash256, 0, sizeof(data->hash256));
3164 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3165
3166 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3167
3168 return 0;
3169}
3170
3171int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3172 u8 *hash192, u8 *randomizer192,
3173 u8 *hash256, u8 *randomizer256)
3174{
3175 struct oob_data *data;
3176
3177 data = hci_find_remote_oob_data(hdev, bdaddr);
3178 if (!data) {
Johan Hedberg0a14ab42014-02-19 14:57:43 +02003179 data = kmalloc(sizeof(*data), GFP_KERNEL);
Marcel Holtmann07988722014-01-10 02:07:29 -08003180 if (!data)
3181 return -ENOMEM;
3182
3183 bacpy(&data->bdaddr, bdaddr);
3184 list_add(&data->list, &hdev->remote_oob_data);
3185 }
3186
3187 memcpy(data->hash192, hash192, sizeof(data->hash192));
3188 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3189
3190 memcpy(data->hash256, hash256, sizeof(data->hash256));
3191 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3192
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003193 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01003194
3195 return 0;
3196}
3197
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003198struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3199 bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003200{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003201 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003202
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003203 list_for_each_entry(b, &hdev->blacklist, list) {
3204 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003205 return b;
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003206 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003207
3208 return NULL;
3209}
3210
Marcel Holtmannc9507492014-02-27 19:35:54 -08003211static void hci_blacklist_clear(struct hci_dev *hdev)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003212{
3213 struct list_head *p, *n;
3214
3215 list_for_each_safe(p, n, &hdev->blacklist) {
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003216 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003217
3218 list_del(p);
3219 kfree(b);
3220 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003221}
3222
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003223int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003224{
3225 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003226
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003227 if (!bacmp(bdaddr, BDADDR_ANY))
Antti Julkub2a66aa2011-06-15 12:01:14 +03003228 return -EBADF;
3229
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003230 if (hci_blacklist_lookup(hdev, bdaddr, type))
Antti Julku5e762442011-08-25 16:48:02 +03003231 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003232
3233 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03003234 if (!entry)
3235 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003236
3237 bacpy(&entry->bdaddr, bdaddr);
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003238 entry->bdaddr_type = type;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003239
3240 list_add(&entry->list, &hdev->blacklist);
3241
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003242 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003243}
3244
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003245int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03003246{
3247 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003248
Johan Hedberg35f74982014-02-18 17:14:32 +02003249 if (!bacmp(bdaddr, BDADDR_ANY)) {
3250 hci_blacklist_clear(hdev);
3251 return 0;
3252 }
Antti Julkub2a66aa2011-06-15 12:01:14 +03003253
Marcel Holtmannb9ee0a72013-10-17 17:24:13 -07003254 entry = hci_blacklist_lookup(hdev, bdaddr, type);
Szymon Janc1ec918c2011-11-16 09:32:21 +01003255 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03003256 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03003257
3258 list_del(&entry->list);
3259 kfree(entry);
3260
Johan Hedberg88c1fe42012-02-09 15:56:11 +02003261 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03003262}
3263
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003264struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3265 bdaddr_t *bdaddr, u8 type)
3266{
3267 struct bdaddr_list *b;
3268
3269 list_for_each_entry(b, &hdev->le_white_list, list) {
3270 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3271 return b;
3272 }
3273
3274 return NULL;
3275}
3276
3277void hci_white_list_clear(struct hci_dev *hdev)
3278{
3279 struct list_head *p, *n;
3280
3281 list_for_each_safe(p, n, &hdev->le_white_list) {
3282 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3283
3284 list_del(p);
3285 kfree(b);
3286 }
3287}
3288
3289int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3290{
3291 struct bdaddr_list *entry;
3292
3293 if (!bacmp(bdaddr, BDADDR_ANY))
3294 return -EBADF;
3295
3296 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3297 if (!entry)
3298 return -ENOMEM;
3299
3300 bacpy(&entry->bdaddr, bdaddr);
3301 entry->bdaddr_type = type;
3302
3303 list_add(&entry->list, &hdev->le_white_list);
3304
3305 return 0;
3306}
3307
3308int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3309{
3310 struct bdaddr_list *entry;
3311
3312 if (!bacmp(bdaddr, BDADDR_ANY))
3313 return -EBADF;
3314
3315 entry = hci_white_list_lookup(hdev, bdaddr, type);
3316 if (!entry)
3317 return -ENOENT;
3318
3319 list_del(&entry->list);
3320 kfree(entry);
3321
3322 return 0;
3323}
3324
Andre Guedes15819a72014-02-03 13:56:18 -03003325/* This function requires the caller holds hdev->lock */
3326struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3327 bdaddr_t *addr, u8 addr_type)
3328{
3329 struct hci_conn_params *params;
3330
3331 list_for_each_entry(params, &hdev->le_conn_params, list) {
3332 if (bacmp(&params->addr, addr) == 0 &&
3333 params->addr_type == addr_type) {
3334 return params;
3335 }
3336 }
3337
3338 return NULL;
3339}
3340
Andre Guedescef952c2014-02-26 20:21:49 -03003341static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3342{
3343 struct hci_conn *conn;
3344
3345 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3346 if (!conn)
3347 return false;
3348
3349 if (conn->dst_type != type)
3350 return false;
3351
3352 if (conn->state != BT_CONNECTED)
3353 return false;
3354
3355 return true;
3356}
3357
Andre Guedesa9b0a042014-02-26 20:21:52 -03003358static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3359{
3360 if (addr_type == ADDR_LE_DEV_PUBLIC)
3361 return true;
3362
3363 /* Check for Random Static address type */
3364 if ((addr->b[5] & 0xc0) == 0xc0)
3365 return true;
3366
3367 return false;
3368}
3369
Andre Guedes15819a72014-02-03 13:56:18 -03003370/* This function requires the caller holds hdev->lock */
Marcel Holtmann4b109662014-06-29 13:41:49 +02003371struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3372 bdaddr_t *addr, u8 addr_type)
3373{
3374 struct bdaddr_list *entry;
3375
3376 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3377 if (bacmp(&entry->bdaddr, addr) == 0 &&
3378 entry->bdaddr_type == addr_type)
3379 return entry;
3380 }
3381
3382 return NULL;
3383}
3384
3385/* This function requires the caller holds hdev->lock */
3386void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3387{
3388 struct bdaddr_list *entry;
3389
3390 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3391 if (entry)
3392 goto done;
3393
3394 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3395 if (!entry) {
3396 BT_ERR("Out of memory");
3397 return;
3398 }
3399
3400 bacpy(&entry->bdaddr, addr);
3401 entry->bdaddr_type = addr_type;
3402
3403 list_add(&entry->list, &hdev->pend_le_conns);
3404
3405 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3406
3407done:
3408 hci_update_background_scan(hdev);
3409}
3410
3411/* This function requires the caller holds hdev->lock */
3412void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3413{
3414 struct bdaddr_list *entry;
3415
3416 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3417 if (!entry)
3418 goto done;
3419
3420 list_del(&entry->list);
3421 kfree(entry);
3422
3423 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3424
3425done:
3426 hci_update_background_scan(hdev);
3427}
3428
3429/* This function requires the caller holds hdev->lock */
3430void hci_pend_le_conns_clear(struct hci_dev *hdev)
3431{
3432 struct bdaddr_list *entry, *tmp;
3433
3434 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3435 list_del(&entry->list);
3436 kfree(entry);
3437 }
3438
3439 BT_DBG("All LE pending connections cleared");
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02003440
3441 hci_update_background_scan(hdev);
Marcel Holtmann4b109662014-06-29 13:41:49 +02003442}
3443
3444/* This function requires the caller holds hdev->lock */
Andre Guedesa9b0a042014-02-26 20:21:52 -03003445int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3446 u8 auto_connect, u16 conn_min_interval,
3447 u16 conn_max_interval)
Andre Guedes15819a72014-02-03 13:56:18 -03003448{
3449 struct hci_conn_params *params;
3450
Andre Guedesa9b0a042014-02-26 20:21:52 -03003451 if (!is_identity_address(addr, addr_type))
3452 return -EINVAL;
3453
Andre Guedes15819a72014-02-03 13:56:18 -03003454 params = hci_conn_params_lookup(hdev, addr, addr_type);
Andre Guedescef952c2014-02-26 20:21:49 -03003455 if (params)
3456 goto update;
Andre Guedes15819a72014-02-03 13:56:18 -03003457
3458 params = kzalloc(sizeof(*params), GFP_KERNEL);
3459 if (!params) {
3460 BT_ERR("Out of memory");
Andre Guedesa9b0a042014-02-26 20:21:52 -03003461 return -ENOMEM;
Andre Guedes15819a72014-02-03 13:56:18 -03003462 }
3463
3464 bacpy(&params->addr, addr);
3465 params->addr_type = addr_type;
Andre Guedescef952c2014-02-26 20:21:49 -03003466
3467 list_add(&params->list, &hdev->le_conn_params);
3468
3469update:
Andre Guedes15819a72014-02-03 13:56:18 -03003470 params->conn_min_interval = conn_min_interval;
3471 params->conn_max_interval = conn_max_interval;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003472 params->conn_latency = hdev->le_conn_latency;
3473 params->supervision_timeout = hdev->le_supv_timeout;
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003474 params->auto_connect = auto_connect;
Andre Guedes15819a72014-02-03 13:56:18 -03003475
Andre Guedescef952c2014-02-26 20:21:49 -03003476 switch (auto_connect) {
3477 case HCI_AUTO_CONN_DISABLED:
3478 case HCI_AUTO_CONN_LINK_LOSS:
3479 hci_pend_le_conn_del(hdev, addr, addr_type);
3480 break;
3481 case HCI_AUTO_CONN_ALWAYS:
3482 if (!is_connected(hdev, addr, addr_type))
3483 hci_pend_le_conn_add(hdev, addr, addr_type);
3484 break;
3485 }
Andre Guedes15819a72014-02-03 13:56:18 -03003486
Andre Guedes9fcb18e2014-02-26 20:21:48 -03003487 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3488 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3489 conn_min_interval, conn_max_interval);
Andre Guedesa9b0a042014-02-26 20:21:52 -03003490
3491 return 0;
Andre Guedes15819a72014-02-03 13:56:18 -03003492}
3493
3494/* This function requires the caller holds hdev->lock */
3495void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3496{
3497 struct hci_conn_params *params;
3498
3499 params = hci_conn_params_lookup(hdev, addr, addr_type);
3500 if (!params)
3501 return;
3502
Andre Guedescef952c2014-02-26 20:21:49 -03003503 hci_pend_le_conn_del(hdev, addr, addr_type);
3504
Andre Guedes15819a72014-02-03 13:56:18 -03003505 list_del(&params->list);
3506 kfree(params);
3507
3508 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3509}
3510
3511/* This function requires the caller holds hdev->lock */
3512void hci_conn_params_clear(struct hci_dev *hdev)
3513{
3514 struct hci_conn_params *params, *tmp;
3515
3516 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3517 list_del(&params->list);
3518 kfree(params);
3519 }
3520
Marcel Holtmann1089b672014-06-29 13:41:50 +02003521 hci_pend_le_conns_clear(hdev);
3522
Andre Guedes15819a72014-02-03 13:56:18 -03003523 BT_DBG("All LE connection parameters were removed");
3524}
3525
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003526static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003527{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003528 if (status) {
3529 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003530
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003531 hci_dev_lock(hdev);
3532 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3533 hci_dev_unlock(hdev);
3534 return;
3535 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003536}
3537
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003538static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003539{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003540 /* General inquiry access code (GIAC) */
3541 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3542 struct hci_request req;
3543 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003544 int err;
3545
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003546 if (status) {
3547 BT_ERR("Failed to disable LE scanning: status %d", status);
3548 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03003549 }
3550
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003551 switch (hdev->discovery.type) {
3552 case DISCOV_TYPE_LE:
3553 hci_dev_lock(hdev);
3554 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3555 hci_dev_unlock(hdev);
3556 break;
3557
3558 case DISCOV_TYPE_INTERLEAVED:
3559 hci_req_init(&req, hdev);
3560
3561 memset(&cp, 0, sizeof(cp));
3562 memcpy(&cp.lap, lap, sizeof(cp.lap));
3563 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3564 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3565
3566 hci_dev_lock(hdev);
3567
3568 hci_inquiry_cache_flush(hdev);
3569
3570 err = hci_req_run(&req, inquiry_complete);
3571 if (err) {
3572 BT_ERR("Inquiry request failed: err %d", err);
3573 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3574 }
3575
3576 hci_dev_unlock(hdev);
3577 break;
3578 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03003579}
3580
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003581static void le_scan_disable_work(struct work_struct *work)
3582{
3583 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003584 le_scan_disable.work);
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003585 struct hci_request req;
3586 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003587
3588 BT_DBG("%s", hdev->name);
3589
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003590 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003591
Andre Guedesb1efcc22014-02-26 20:21:40 -03003592 hci_req_add_le_scan_disable(&req);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03003593
Andre Guedes4c87eaa2013-04-30 15:29:32 -03003594 err = hci_req_run(&req, le_scan_disable_work_complete);
3595 if (err)
3596 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03003597}
3598
Johan Hedberg8d972502014-02-28 12:54:14 +02003599static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3600{
3601 struct hci_dev *hdev = req->hdev;
3602
3603 /* If we're advertising or initiating an LE connection we can't
3604 * go ahead and change the random address at this time. This is
3605 * because the eventual initiator address used for the
3606 * subsequently created connection will be undefined (some
3607 * controllers use the new address and others the one we had
3608 * when the operation started).
3609 *
3610 * In this kind of scenario skip the update and let the random
3611 * address be updated at the next cycle.
3612 */
3613 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3614 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3615 BT_DBG("Deferring random address update");
3616 return;
3617 }
3618
3619 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3620}
3621
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003622int hci_update_random_address(struct hci_request *req, bool require_privacy,
3623 u8 *own_addr_type)
Johan Hedbergebd3a742014-02-23 19:42:21 +02003624{
3625 struct hci_dev *hdev = req->hdev;
3626 int err;
3627
3628 /* If privacy is enabled use a resolvable private address. If
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003629 * current RPA has expired or there is something else than
3630 * the current RPA in use, then generate a new one.
Johan Hedbergebd3a742014-02-23 19:42:21 +02003631 */
3632 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
Johan Hedbergebd3a742014-02-23 19:42:21 +02003633 int to;
3634
3635 *own_addr_type = ADDR_LE_DEV_RANDOM;
3636
3637 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003638 !bacmp(&hdev->random_addr, &hdev->rpa))
Johan Hedbergebd3a742014-02-23 19:42:21 +02003639 return 0;
3640
Marcel Holtmann2b5224d2014-02-23 20:39:22 -08003641 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003642 if (err < 0) {
3643 BT_ERR("%s failed to generate new RPA", hdev->name);
3644 return err;
3645 }
3646
Johan Hedberg8d972502014-02-28 12:54:14 +02003647 set_random_addr(req, &hdev->rpa);
Johan Hedbergebd3a742014-02-23 19:42:21 +02003648
3649 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3650 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3651
3652 return 0;
3653 }
3654
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003655 /* In case of required privacy without resolvable private address,
3656 * use an unresolvable private address. This is useful for active
3657 * scanning and non-connectable advertising.
3658 */
3659 if (require_privacy) {
3660 bdaddr_t urpa;
3661
3662 get_random_bytes(&urpa, 6);
3663 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3664
3665 *own_addr_type = ADDR_LE_DEV_RANDOM;
Johan Hedberg8d972502014-02-28 12:54:14 +02003666 set_random_addr(req, &urpa);
Marcel Holtmann94b1fc92014-02-23 20:25:54 -08003667 return 0;
3668 }
3669
Johan Hedbergebd3a742014-02-23 19:42:21 +02003670 /* If forcing static address is in use or there is no public
3671 * address use the static address as random address (but skip
3672 * the HCI command if the current random address is already the
3673 * static one.
3674 */
Marcel Holtmann111902f2014-06-21 04:53:17 +02003675 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedbergebd3a742014-02-23 19:42:21 +02003676 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3677 *own_addr_type = ADDR_LE_DEV_RANDOM;
3678 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3679 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3680 &hdev->static_addr);
3681 return 0;
3682 }
3683
3684 /* Neither privacy nor static address is being used so use a
3685 * public address.
3686 */
3687 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3688
3689 return 0;
3690}
3691
Johan Hedberga1f4c312014-02-27 14:05:41 +02003692/* Copy the Identity Address of the controller.
3693 *
3694 * If the controller has a public BD_ADDR, then by default use that one.
3695 * If this is a LE only controller without a public address, default to
3696 * the static random address.
3697 *
3698 * For debugging purposes it is possible to force controllers with a
3699 * public address to use the static random address instead.
3700 */
3701void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3702 u8 *bdaddr_type)
3703{
Marcel Holtmann111902f2014-06-21 04:53:17 +02003704 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
Johan Hedberga1f4c312014-02-27 14:05:41 +02003705 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3706 bacpy(bdaddr, &hdev->static_addr);
3707 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3708 } else {
3709 bacpy(bdaddr, &hdev->bdaddr);
3710 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3711 }
3712}
3713
David Herrmann9be0dab2012-04-22 14:39:57 +02003714/* Alloc HCI device */
3715struct hci_dev *hci_alloc_dev(void)
3716{
3717 struct hci_dev *hdev;
3718
3719 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3720 if (!hdev)
3721 return NULL;
3722
David Herrmannb1b813d2012-04-22 14:39:58 +02003723 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3724 hdev->esco_type = (ESCO_HV1);
3725 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07003726 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3727 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01003728 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3729 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02003730
David Herrmannb1b813d2012-04-22 14:39:58 +02003731 hdev->sniff_max_interval = 800;
3732 hdev->sniff_min_interval = 80;
3733
Marcel Holtmann3f959d42014-02-20 11:55:56 -08003734 hdev->le_adv_channel_map = 0x07;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003735 hdev->le_scan_interval = 0x0060;
3736 hdev->le_scan_window = 0x0030;
Marcel Holtmann4e70c7e2013-10-19 07:09:13 -07003737 hdev->le_conn_min_interval = 0x0028;
3738 hdev->le_conn_max_interval = 0x0038;
Marcel Holtmann04fb7d92014-06-30 12:34:36 +02003739 hdev->le_conn_latency = 0x0000;
3740 hdev->le_supv_timeout = 0x002a;
Marcel Holtmannbef64732013-10-11 08:23:19 -07003741
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003742 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
Lukasz Rymanowskib9a7a612014-03-27 20:55:20 +01003743 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
Andrzej Kaczmarek31ad1692014-05-14 13:43:02 +02003744 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3745 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
Johan Hedbergd6bfd592014-02-23 19:42:20 +02003746
David Herrmannb1b813d2012-04-22 14:39:58 +02003747 mutex_init(&hdev->lock);
3748 mutex_init(&hdev->req_lock);
3749
3750 INIT_LIST_HEAD(&hdev->mgmt_pending);
3751 INIT_LIST_HEAD(&hdev->blacklist);
3752 INIT_LIST_HEAD(&hdev->uuids);
3753 INIT_LIST_HEAD(&hdev->link_keys);
3754 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003755 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
David Herrmannb1b813d2012-04-22 14:39:58 +02003756 INIT_LIST_HEAD(&hdev->remote_oob_data);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003757 INIT_LIST_HEAD(&hdev->le_white_list);
Andre Guedes15819a72014-02-03 13:56:18 -03003758 INIT_LIST_HEAD(&hdev->le_conn_params);
Andre Guedes77a77a32014-02-26 20:21:46 -03003759 INIT_LIST_HEAD(&hdev->pend_le_conns);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03003760 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02003761
3762 INIT_WORK(&hdev->rx_work, hci_rx_work);
3763 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3764 INIT_WORK(&hdev->tx_work, hci_tx_work);
3765 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02003766
David Herrmannb1b813d2012-04-22 14:39:58 +02003767 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3768 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3769 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3770
David Herrmannb1b813d2012-04-22 14:39:58 +02003771 skb_queue_head_init(&hdev->rx_q);
3772 skb_queue_head_init(&hdev->cmd_q);
3773 skb_queue_head_init(&hdev->raw_q);
3774
3775 init_waitqueue_head(&hdev->req_wait_q);
3776
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02003777 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
David Herrmannb1b813d2012-04-22 14:39:58 +02003778
David Herrmannb1b813d2012-04-22 14:39:58 +02003779 hci_init_sysfs(hdev);
3780 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02003781
3782 return hdev;
3783}
3784EXPORT_SYMBOL(hci_alloc_dev);
3785
3786/* Free HCI device */
3787void hci_free_dev(struct hci_dev *hdev)
3788{
David Herrmann9be0dab2012-04-22 14:39:57 +02003789 /* will free via device release */
3790 put_device(&hdev->dev);
3791}
3792EXPORT_SYMBOL(hci_free_dev);
3793
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794/* Register HCI device */
3795int hci_register_dev(struct hci_dev *hdev)
3796{
David Herrmannb1b813d2012-04-22 14:39:58 +02003797 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798
David Herrmann010666a2012-01-07 15:47:07 +01003799 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003800 return -EINVAL;
3801
Mat Martineau08add512011-11-02 16:18:36 -07003802 /* Do not allow HCI_AMP devices to register at index 0,
3803 * so the index can be used as the AMP controller ID.
3804 */
Sasha Levin3df92b32012-05-27 22:36:56 +02003805 switch (hdev->dev_type) {
3806 case HCI_BREDR:
3807 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3808 break;
3809 case HCI_AMP:
3810 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3811 break;
3812 default:
3813 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003814 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003815
Sasha Levin3df92b32012-05-27 22:36:56 +02003816 if (id < 0)
3817 return id;
3818
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819 sprintf(hdev->name, "hci%d", id);
3820 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03003821
3822 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3823
Kees Cookd8537542013-07-03 15:04:57 -07003824 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3825 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02003826 if (!hdev->workqueue) {
3827 error = -ENOMEM;
3828 goto err;
3829 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003830
Kees Cookd8537542013-07-03 15:04:57 -07003831 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3832 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003833 if (!hdev->req_workqueue) {
3834 destroy_workqueue(hdev->workqueue);
3835 error = -ENOMEM;
3836 goto err;
3837 }
3838
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003839 if (!IS_ERR_OR_NULL(bt_debugfs))
3840 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3841
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003842 dev_set_name(&hdev->dev, "%s", hdev->name);
3843
Johan Hedberg99780a72014-02-18 10:40:07 +02003844 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3845 CRYPTO_ALG_ASYNC);
3846 if (IS_ERR(hdev->tfm_aes)) {
3847 BT_ERR("Unable to create crypto context");
3848 error = PTR_ERR(hdev->tfm_aes);
3849 hdev->tfm_aes = NULL;
3850 goto err_wqueue;
3851 }
3852
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003853 error = device_add(&hdev->dev);
David Herrmann33ca9542011-10-08 14:58:49 +02003854 if (error < 0)
Johan Hedberg99780a72014-02-18 10:40:07 +02003855 goto err_tfm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003857 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003858 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3859 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003860 if (hdev->rfkill) {
3861 if (rfkill_register(hdev->rfkill) < 0) {
3862 rfkill_destroy(hdev->rfkill);
3863 hdev->rfkill = NULL;
3864 }
3865 }
3866
Johan Hedberg5e130362013-09-13 08:58:17 +03003867 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3868 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3869
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02003870 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07003871 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003872
Marcel Holtmann01cd3402013-10-06 01:16:22 -07003873 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03003874 /* Assume BR/EDR support until proven otherwise (such as
3875 * through reading supported features during init.
3876 */
3877 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3878 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03003879
Gustavo Padovanfcee3372013-07-11 11:34:28 +01003880 write_lock(&hci_dev_list_lock);
3881 list_add(&hdev->list, &hci_dev_list);
3882 write_unlock(&hci_dev_list_lock);
3883
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003884 /* Devices that are marked for raw-only usage need to set
3885 * the HCI_RAW flag to indicate that only user channel is
3886 * supported.
3887 */
3888 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3889 set_bit(HCI_RAW, &hdev->flags);
3890
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01003892 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893
Johan Hedberg19202572013-01-14 22:33:51 +02003894 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07003895
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003897
Johan Hedberg99780a72014-02-18 10:40:07 +02003898err_tfm:
3899 crypto_free_blkcipher(hdev->tfm_aes);
David Herrmann33ca9542011-10-08 14:58:49 +02003900err_wqueue:
3901 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003902 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02003903err:
Sasha Levin3df92b32012-05-27 22:36:56 +02003904 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003905
David Herrmann33ca9542011-10-08 14:58:49 +02003906 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907}
3908EXPORT_SYMBOL(hci_register_dev);
3909
3910/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02003911void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912{
Sasha Levin3df92b32012-05-27 22:36:56 +02003913 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02003914
Marcel Holtmannc13854c2010-02-08 15:27:07 +01003915 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916
Johan Hovold94324962012-03-15 14:48:41 +01003917 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3918
Sasha Levin3df92b32012-05-27 22:36:56 +02003919 id = hdev->id;
3920
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003921 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02003923 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924
3925 hci_dev_do_close(hdev);
3926
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05303927 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02003928 kfree_skb(hdev->reassembly[i]);
3929
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02003930 cancel_work_sync(&hdev->power_on);
3931
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003932 if (!test_bit(HCI_INIT, &hdev->flags) &&
Marcel Holtmannfee746b2014-06-29 12:13:05 +02003933 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
3934 !test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003935 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02003936 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003937 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02003938 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02003939
Johan Hedberg2e58ef32011-11-08 20:40:15 +02003940 /* mgmt_index_removed should take care of emptying the
3941 * pending list */
3942 BUG_ON(!list_empty(&hdev->mgmt_pending));
3943
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 hci_notify(hdev, HCI_DEV_UNREG);
3945
Marcel Holtmann611b30f2009-06-08 14:41:38 +02003946 if (hdev->rfkill) {
3947 rfkill_unregister(hdev->rfkill);
3948 rfkill_destroy(hdev->rfkill);
3949 }
3950
Johan Hedberg99780a72014-02-18 10:40:07 +02003951 if (hdev->tfm_aes)
3952 crypto_free_blkcipher(hdev->tfm_aes);
3953
Marcel Holtmannbdc3e0f2013-10-17 17:24:19 -07003954 device_del(&hdev->dev);
Dave Young147e2d52008-03-05 18:45:59 -08003955
Marcel Holtmann0153e2e2013-10-17 17:24:17 -07003956 debugfs_remove_recursive(hdev->debugfs);
3957
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003958 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02003959 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01003960
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003961 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003962 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02003963 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02003964 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03003965 hci_smp_ltks_clear(hdev);
Johan Hedberg970c4e42014-02-18 10:19:33 +02003966 hci_smp_irks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01003967 hci_remote_oob_data_clear(hdev);
Marcel Holtmannd2ab0ac2014-02-27 20:37:30 -08003968 hci_white_list_clear(hdev);
Andre Guedes15819a72014-02-03 13:56:18 -03003969 hci_conn_params_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03003970 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02003971
David Herrmanndc946bd2012-01-07 15:47:24 +01003972 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02003973
3974 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975}
3976EXPORT_SYMBOL(hci_unregister_dev);
3977
3978/* Suspend HCI device */
3979int hci_suspend_dev(struct hci_dev *hdev)
3980{
3981 hci_notify(hdev, HCI_DEV_SUSPEND);
3982 return 0;
3983}
3984EXPORT_SYMBOL(hci_suspend_dev);
3985
3986/* Resume HCI device */
3987int hci_resume_dev(struct hci_dev *hdev)
3988{
3989 hci_notify(hdev, HCI_DEV_RESUME);
3990 return 0;
3991}
3992EXPORT_SYMBOL(hci_resume_dev);
3993
Marcel Holtmann76bca882009-11-18 00:40:39 +01003994/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07003995int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01003996{
Marcel Holtmann76bca882009-11-18 00:40:39 +01003997 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003998 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01003999 kfree_skb(skb);
4000 return -ENXIO;
4001 }
4002
Jorrit Schippersd82603c2012-12-27 17:33:02 +01004003 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01004004 bt_cb(skb)->incoming = 1;
4005
4006 /* Time stamp */
4007 __net_timestamp(skb);
4008
Marcel Holtmann76bca882009-11-18 00:40:39 +01004009 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04004010 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004011
Marcel Holtmann76bca882009-11-18 00:40:39 +01004012 return 0;
4013}
4014EXPORT_SYMBOL(hci_recv_frame);
4015
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304016static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004017 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304018{
4019 int len = 0;
4020 int hlen = 0;
4021 int remain = count;
4022 struct sk_buff *skb;
4023 struct bt_skb_cb *scb;
4024
4025 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004026 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304027 return -EILSEQ;
4028
4029 skb = hdev->reassembly[index];
4030
4031 if (!skb) {
4032 switch (type) {
4033 case HCI_ACLDATA_PKT:
4034 len = HCI_MAX_FRAME_SIZE;
4035 hlen = HCI_ACL_HDR_SIZE;
4036 break;
4037 case HCI_EVENT_PKT:
4038 len = HCI_MAX_EVENT_SIZE;
4039 hlen = HCI_EVENT_HDR_SIZE;
4040 break;
4041 case HCI_SCODATA_PKT:
4042 len = HCI_MAX_SCO_SIZE;
4043 hlen = HCI_SCO_HDR_SIZE;
4044 break;
4045 }
4046
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004047 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304048 if (!skb)
4049 return -ENOMEM;
4050
4051 scb = (void *) skb->cb;
4052 scb->expect = hlen;
4053 scb->pkt_type = type;
4054
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304055 hdev->reassembly[index] = skb;
4056 }
4057
4058 while (count) {
4059 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03004060 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304061
4062 memcpy(skb_put(skb, len), data, len);
4063
4064 count -= len;
4065 data += len;
4066 scb->expect -= len;
4067 remain = count;
4068
4069 switch (type) {
4070 case HCI_EVENT_PKT:
4071 if (skb->len == HCI_EVENT_HDR_SIZE) {
4072 struct hci_event_hdr *h = hci_event_hdr(skb);
4073 scb->expect = h->plen;
4074
4075 if (skb_tailroom(skb) < scb->expect) {
4076 kfree_skb(skb);
4077 hdev->reassembly[index] = NULL;
4078 return -ENOMEM;
4079 }
4080 }
4081 break;
4082
4083 case HCI_ACLDATA_PKT:
4084 if (skb->len == HCI_ACL_HDR_SIZE) {
4085 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4086 scb->expect = __le16_to_cpu(h->dlen);
4087
4088 if (skb_tailroom(skb) < scb->expect) {
4089 kfree_skb(skb);
4090 hdev->reassembly[index] = NULL;
4091 return -ENOMEM;
4092 }
4093 }
4094 break;
4095
4096 case HCI_SCODATA_PKT:
4097 if (skb->len == HCI_SCO_HDR_SIZE) {
4098 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4099 scb->expect = h->dlen;
4100
4101 if (skb_tailroom(skb) < scb->expect) {
4102 kfree_skb(skb);
4103 hdev->reassembly[index] = NULL;
4104 return -ENOMEM;
4105 }
4106 }
4107 break;
4108 }
4109
4110 if (scb->expect == 0) {
4111 /* Complete frame */
4112
4113 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07004114 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05304115
4116 hdev->reassembly[index] = NULL;
4117 return remain;
4118 }
4119 }
4120
4121 return remain;
4122}
4123
Marcel Holtmannef222012007-07-11 06:42:04 +02004124int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4125{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304126 int rem = 0;
4127
Marcel Holtmannef222012007-07-11 06:42:04 +02004128 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4129 return -EILSEQ;
4130
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004131 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004132 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304133 if (rem < 0)
4134 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004135
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304136 data += (count - rem);
4137 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004138 }
Marcel Holtmannef222012007-07-11 06:42:04 +02004139
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05304140 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02004141}
4142EXPORT_SYMBOL(hci_recv_fragment);
4143
Suraj Sumangala99811512010-07-14 13:02:19 +05304144#define STREAM_REASSEMBLY 0
4145
4146int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4147{
4148 int type;
4149 int rem = 0;
4150
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03004151 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05304152 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4153
4154 if (!skb) {
4155 struct { char type; } *pkt;
4156
4157 /* Start of the frame */
4158 pkt = data;
4159 type = pkt->type;
4160
4161 data++;
4162 count--;
4163 } else
4164 type = bt_cb(skb)->pkt_type;
4165
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03004166 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004167 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05304168 if (rem < 0)
4169 return rem;
4170
4171 data += (count - rem);
4172 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00004173 }
Suraj Sumangala99811512010-07-14 13:02:19 +05304174
4175 return rem;
4176}
4177EXPORT_SYMBOL(hci_recv_stream_fragment);
4178
Linus Torvalds1da177e2005-04-16 15:20:36 -07004179/* ---- Interface to upper protocols ---- */
4180
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181int hci_register_cb(struct hci_cb *cb)
4182{
4183 BT_DBG("%p name %s", cb, cb->name);
4184
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004185 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004187 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188
4189 return 0;
4190}
4191EXPORT_SYMBOL(hci_register_cb);
4192
4193int hci_unregister_cb(struct hci_cb *cb)
4194{
4195 BT_DBG("%p name %s", cb, cb->name);
4196
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004197 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02004199 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
4201 return 0;
4202}
4203EXPORT_SYMBOL(hci_unregister_cb);
4204
Marcel Holtmann51086992013-10-10 14:54:19 -07004205static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004207 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004209 /* Time stamp */
4210 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
Marcel Holtmanncd82e612012-02-20 20:34:38 +01004212 /* Send copy to monitor */
4213 hci_send_to_monitor(hdev, skb);
4214
4215 if (atomic_read(&hdev->promisc)) {
4216 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01004217 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 }
4219
4220 /* Get rid of skb owner, prior to sending to the driver. */
4221 skb_orphan(skb);
4222
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07004223 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07004224 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225}
4226
Johan Hedberg3119ae92013-03-05 20:37:44 +02004227void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4228{
4229 skb_queue_head_init(&req->cmd_q);
4230 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03004231 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004232}
4233
4234int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4235{
4236 struct hci_dev *hdev = req->hdev;
4237 struct sk_buff *skb;
4238 unsigned long flags;
4239
4240 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4241
Andre Guedes5d73e032013-03-08 11:20:16 -03004242 /* If an error occured during request building, remove all HCI
4243 * commands queued on the HCI request queue.
4244 */
4245 if (req->err) {
4246 skb_queue_purge(&req->cmd_q);
4247 return req->err;
4248 }
4249
Johan Hedberg3119ae92013-03-05 20:37:44 +02004250 /* Do not allow empty requests */
4251 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03004252 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02004253
4254 skb = skb_peek_tail(&req->cmd_q);
4255 bt_cb(skb)->req.complete = complete;
4256
4257 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4258 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4259 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4260
4261 queue_work(hdev->workqueue, &hdev->cmd_work);
4262
4263 return 0;
4264}
4265
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004266static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004267 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268{
4269 int len = HCI_COMMAND_HDR_SIZE + plen;
4270 struct hci_command_hdr *hdr;
4271 struct sk_buff *skb;
4272
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004274 if (!skb)
4275 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276
4277 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004278 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279 hdr->plen = plen;
4280
4281 if (plen)
4282 memcpy(skb_put(skb, plen), param, plen);
4283
4284 BT_DBG("skb len %d", skb->len);
4285
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004286 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004287
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004288 return skb;
4289}
4290
4291/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004292int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4293 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02004294{
4295 struct sk_buff *skb;
4296
4297 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4298
4299 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4300 if (!skb) {
4301 BT_ERR("%s no memory for command", hdev->name);
4302 return -ENOMEM;
4303 }
4304
Johan Hedberg11714b32013-03-05 20:37:47 +02004305 /* Stand-alone HCI commands must be flaged as
4306 * single-command requests.
4307 */
4308 bt_cb(skb)->req.start = true;
4309
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02004311 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
4313 return 0;
4314}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315
Johan Hedberg71c76a12013-03-05 20:37:46 +02004316/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004317void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4318 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02004319{
4320 struct hci_dev *hdev = req->hdev;
4321 struct sk_buff *skb;
4322
4323 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4324
Andre Guedes34739c12013-03-08 11:20:18 -03004325 /* If an error occured during request building, there is no point in
4326 * queueing the HCI command. We can simply return.
4327 */
4328 if (req->err)
4329 return;
4330
Johan Hedberg71c76a12013-03-05 20:37:46 +02004331 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4332 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03004333 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4334 hdev->name, opcode);
4335 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03004336 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02004337 }
4338
4339 if (skb_queue_empty(&req->cmd_q))
4340 bt_cb(skb)->req.start = true;
4341
Johan Hedberg02350a72013-04-03 21:50:29 +03004342 bt_cb(skb)->req.event = event;
4343
Johan Hedberg71c76a12013-03-05 20:37:46 +02004344 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02004345}
4346
Johan Hedberg07dc93d2013-04-19 10:14:51 +03004347void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4348 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03004349{
4350 hci_req_add_ev(req, opcode, plen, param, 0);
4351}
4352
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004354void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355{
4356 struct hci_command_hdr *hdr;
4357
4358 if (!hdev->sent_cmd)
4359 return NULL;
4360
4361 hdr = (void *) hdev->sent_cmd->data;
4362
Marcel Holtmanna9de9242007-10-20 13:33:56 +02004363 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 return NULL;
4365
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004366 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367
4368 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4369}
4370
4371/* Send ACL data */
4372static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4373{
4374 struct hci_acl_hdr *hdr;
4375 int len = skb->len;
4376
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004377 skb_push(skb, HCI_ACL_HDR_SIZE);
4378 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004379 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004380 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4381 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004382}
4383
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004384static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004385 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004386{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004387 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 struct hci_dev *hdev = conn->hdev;
4389 struct sk_buff *list;
4390
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004391 skb->len = skb_headlen(skb);
4392 skb->data_len = 0;
4393
4394 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03004395
4396 switch (hdev->dev_type) {
4397 case HCI_BREDR:
4398 hci_add_acl_hdr(skb, conn->handle, flags);
4399 break;
4400 case HCI_AMP:
4401 hci_add_acl_hdr(skb, chan->handle, flags);
4402 break;
4403 default:
4404 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4405 return;
4406 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03004407
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02004408 list = skb_shinfo(skb)->frag_list;
4409 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004410 /* Non fragmented */
4411 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4412
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004413 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 } else {
4415 /* Fragmented */
4416 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4417
4418 skb_shinfo(skb)->frag_list = NULL;
4419
4420 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004421 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004422
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004423 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004424
4425 flags &= ~ACL_START;
4426 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004427 do {
4428 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004429
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004430 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02004431 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004432
4433 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4434
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004435 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 } while (list);
4437
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02004438 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004439 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004440}
4441
4442void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4443{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004444 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004445
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004446 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004447
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03004448 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004449
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004450 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004451}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452
4453/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03004454void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455{
4456 struct hci_dev *hdev = conn->hdev;
4457 struct hci_sco_hdr hdr;
4458
4459 BT_DBG("%s len %d", hdev->name, skb->len);
4460
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004461 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004462 hdr.dlen = skb->len;
4463
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03004464 skb_push(skb, HCI_SCO_HDR_SIZE);
4465 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07004466 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004467
Marcel Holtmann0d48d932005-08-09 20:30:28 -07004468 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01004469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004471 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473
4474/* ---- HCI TX task (outgoing data) ---- */
4475
4476/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004477static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4478 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479{
4480 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004481 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02004482 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004484 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004486
4487 rcu_read_lock();
4488
4489 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02004490 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004491 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02004492
4493 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4494 continue;
4495
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 num++;
4497
4498 if (c->sent < min) {
4499 min = c->sent;
4500 conn = c;
4501 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004502
4503 if (hci_conn_num(hdev, type) == num)
4504 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 }
4506
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004507 rcu_read_unlock();
4508
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004510 int cnt, q;
4511
4512 switch (conn->type) {
4513 case ACL_LINK:
4514 cnt = hdev->acl_cnt;
4515 break;
4516 case SCO_LINK:
4517 case ESCO_LINK:
4518 cnt = hdev->sco_cnt;
4519 break;
4520 case LE_LINK:
4521 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4522 break;
4523 default:
4524 cnt = 0;
4525 BT_ERR("Unknown link type");
4526 }
4527
4528 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 *quote = q ? q : 1;
4530 } else
4531 *quote = 0;
4532
4533 BT_DBG("conn %p quote %d", conn, *quote);
4534 return conn;
4535}
4536
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004537static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004538{
4539 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02004540 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004541
Ville Tervobae1f5d92011-02-10 22:38:53 -03004542 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004544 rcu_read_lock();
4545
Linus Torvalds1da177e2005-04-16 15:20:36 -07004546 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004547 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03004548 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03004549 BT_ERR("%s killing stalled connection %pMR",
4550 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03004551 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004552 }
4553 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004554
4555 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556}
4557
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004558static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4559 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004560{
4561 struct hci_conn_hash *h = &hdev->conn_hash;
4562 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02004563 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004564 struct hci_conn *conn;
4565 int cnt, q, conn_num = 0;
4566
4567 BT_DBG("%s", hdev->name);
4568
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004569 rcu_read_lock();
4570
4571 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004572 struct hci_chan *tmp;
4573
4574 if (conn->type != type)
4575 continue;
4576
4577 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4578 continue;
4579
4580 conn_num++;
4581
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004582 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004583 struct sk_buff *skb;
4584
4585 if (skb_queue_empty(&tmp->data_q))
4586 continue;
4587
4588 skb = skb_peek(&tmp->data_q);
4589 if (skb->priority < cur_prio)
4590 continue;
4591
4592 if (skb->priority > cur_prio) {
4593 num = 0;
4594 min = ~0;
4595 cur_prio = skb->priority;
4596 }
4597
4598 num++;
4599
4600 if (conn->sent < min) {
4601 min = conn->sent;
4602 chan = tmp;
4603 }
4604 }
4605
4606 if (hci_conn_num(hdev, type) == conn_num)
4607 break;
4608 }
4609
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004610 rcu_read_unlock();
4611
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004612 if (!chan)
4613 return NULL;
4614
4615 switch (chan->conn->type) {
4616 case ACL_LINK:
4617 cnt = hdev->acl_cnt;
4618 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004619 case AMP_LINK:
4620 cnt = hdev->block_cnt;
4621 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004622 case SCO_LINK:
4623 case ESCO_LINK:
4624 cnt = hdev->sco_cnt;
4625 break;
4626 case LE_LINK:
4627 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4628 break;
4629 default:
4630 cnt = 0;
4631 BT_ERR("Unknown link type");
4632 }
4633
4634 q = cnt / num;
4635 *quote = q ? q : 1;
4636 BT_DBG("chan %p quote %d", chan, *quote);
4637 return chan;
4638}
4639
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004640static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4641{
4642 struct hci_conn_hash *h = &hdev->conn_hash;
4643 struct hci_conn *conn;
4644 int num = 0;
4645
4646 BT_DBG("%s", hdev->name);
4647
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004648 rcu_read_lock();
4649
4650 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004651 struct hci_chan *chan;
4652
4653 if (conn->type != type)
4654 continue;
4655
4656 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4657 continue;
4658
4659 num++;
4660
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02004661 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004662 struct sk_buff *skb;
4663
4664 if (chan->sent) {
4665 chan->sent = 0;
4666 continue;
4667 }
4668
4669 if (skb_queue_empty(&chan->data_q))
4670 continue;
4671
4672 skb = skb_peek(&chan->data_q);
4673 if (skb->priority >= HCI_PRIO_MAX - 1)
4674 continue;
4675
4676 skb->priority = HCI_PRIO_MAX - 1;
4677
4678 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004679 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004680 }
4681
4682 if (hci_conn_num(hdev, type) == num)
4683 break;
4684 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02004685
4686 rcu_read_unlock();
4687
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004688}
4689
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004690static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4691{
4692 /* Calculate count of blocks used by this packet */
4693 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4694}
4695
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004696static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697{
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004698 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 /* ACL tx timeout must be longer than maximum
4700 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004701 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03004702 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004703 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004704 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004705}
Linus Torvalds1da177e2005-04-16 15:20:36 -07004706
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004707static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004708{
4709 unsigned int cnt = hdev->acl_cnt;
4710 struct hci_chan *chan;
4711 struct sk_buff *skb;
4712 int quote;
4713
4714 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004715
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004716 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004717 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004718 u32 priority = (skb_peek(&chan->data_q))->priority;
4719 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004720 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004721 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004722
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004723 /* Stop if priority has changed */
4724 if (skb->priority < priority)
4725 break;
4726
4727 skb = skb_dequeue(&chan->data_q);
4728
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004729 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03004730 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004731
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004732 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004733 hdev->acl_last_tx = jiffies;
4734
4735 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004736 chan->sent++;
4737 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 }
4739 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004740
4741 if (cnt != hdev->acl_cnt)
4742 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743}
4744
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004745static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004746{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004747 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004748 struct hci_chan *chan;
4749 struct sk_buff *skb;
4750 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004751 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004752
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02004753 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004754
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004755 BT_DBG("%s", hdev->name);
4756
4757 if (hdev->dev_type == HCI_AMP)
4758 type = AMP_LINK;
4759 else
4760 type = ACL_LINK;
4761
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004762 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004763 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004764 u32 priority = (skb_peek(&chan->data_q))->priority;
4765 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4766 int blocks;
4767
4768 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004769 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004770
4771 /* Stop if priority has changed */
4772 if (skb->priority < priority)
4773 break;
4774
4775 skb = skb_dequeue(&chan->data_q);
4776
4777 blocks = __get_blocks(hdev, skb);
4778 if (blocks > hdev->block_cnt)
4779 return;
4780
4781 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004782 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004783
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004784 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004785 hdev->acl_last_tx = jiffies;
4786
4787 hdev->block_cnt -= blocks;
4788 quote -= blocks;
4789
4790 chan->sent += blocks;
4791 chan->conn->sent += blocks;
4792 }
4793 }
4794
4795 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004796 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004797}
4798
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004799static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004800{
4801 BT_DBG("%s", hdev->name);
4802
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03004803 /* No ACL link over BR/EDR controller */
4804 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4805 return;
4806
4807 /* No AMP link over AMP controller */
4808 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02004809 return;
4810
4811 switch (hdev->flow_ctl_mode) {
4812 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4813 hci_sched_acl_pkt(hdev);
4814 break;
4815
4816 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4817 hci_sched_acl_blk(hdev);
4818 break;
4819 }
4820}
4821
Linus Torvalds1da177e2005-04-16 15:20:36 -07004822/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004823static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004824{
4825 struct hci_conn *conn;
4826 struct sk_buff *skb;
4827 int quote;
4828
4829 BT_DBG("%s", hdev->name);
4830
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004831 if (!hci_conn_num(hdev, SCO_LINK))
4832 return;
4833
Linus Torvalds1da177e2005-04-16 15:20:36 -07004834 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4835 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4836 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004837 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004838
4839 conn->sent++;
4840 if (conn->sent == ~0)
4841 conn->sent = 0;
4842 }
4843 }
4844}
4845
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004846static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004847{
4848 struct hci_conn *conn;
4849 struct sk_buff *skb;
4850 int quote;
4851
4852 BT_DBG("%s", hdev->name);
4853
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004854 if (!hci_conn_num(hdev, ESCO_LINK))
4855 return;
4856
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03004857 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4858 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004859 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4860 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004861 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02004862
4863 conn->sent++;
4864 if (conn->sent == ~0)
4865 conn->sent = 0;
4866 }
4867 }
4868}
4869
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004870static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004871{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004872 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004873 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004874 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004875
4876 BT_DBG("%s", hdev->name);
4877
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03004878 if (!hci_conn_num(hdev, LE_LINK))
4879 return;
4880
Marcel Holtmannfee746b2014-06-29 12:13:05 +02004881 if (!test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004882 /* LE tx timeout must be longer than maximum
4883 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03004884 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004885 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03004886 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004887 }
4888
4889 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004890 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004891 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004892 u32 priority = (skb_peek(&chan->data_q))->priority;
4893 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004894 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004895 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004896
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02004897 /* Stop if priority has changed */
4898 if (skb->priority < priority)
4899 break;
4900
4901 skb = skb_dequeue(&chan->data_q);
4902
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004903 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004904 hdev->le_last_tx = jiffies;
4905
4906 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004907 chan->sent++;
4908 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004909 }
4910 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02004911
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004912 if (hdev->le_pkts)
4913 hdev->le_cnt = cnt;
4914 else
4915 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02004916
4917 if (cnt != tmp)
4918 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004919}
4920
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004921static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02004923 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 struct sk_buff *skb;
4925
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004926 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004927 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004928
Marcel Holtmann52de5992013-09-03 18:08:38 -07004929 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4930 /* Schedule queues and send stuff to HCI driver */
4931 hci_sched_acl(hdev);
4932 hci_sched_sco(hdev);
4933 hci_sched_esco(hdev);
4934 hci_sched_le(hdev);
4935 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03004936
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 /* Send next queued raw (unknown type) packet */
4938 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07004939 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004940}
4941
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004942/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943
4944/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004945static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946{
4947 struct hci_acl_hdr *hdr = (void *) skb->data;
4948 struct hci_conn *conn;
4949 __u16 handle, flags;
4950
4951 skb_pull(skb, HCI_ACL_HDR_SIZE);
4952
4953 handle = __le16_to_cpu(hdr->handle);
4954 flags = hci_flags(handle);
4955 handle = hci_handle(handle);
4956
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004957 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004958 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004959
4960 hdev->stat.acl_rx++;
4961
4962 hci_dev_lock(hdev);
4963 conn = hci_conn_hash_lookup_handle(hdev, handle);
4964 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004965
Linus Torvalds1da177e2005-04-16 15:20:36 -07004966 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08004967 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02004968
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02004970 l2cap_recv_acldata(conn, skb, flags);
4971 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004973 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03004974 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004975 }
4976
4977 kfree_skb(skb);
4978}
4979
4980/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03004981static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982{
4983 struct hci_sco_hdr *hdr = (void *) skb->data;
4984 struct hci_conn *conn;
4985 __u16 handle;
4986
4987 skb_pull(skb, HCI_SCO_HDR_SIZE);
4988
4989 handle = __le16_to_cpu(hdr->handle);
4990
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03004991 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004992
4993 hdev->stat.sco_rx++;
4994
4995 hci_dev_lock(hdev);
4996 conn = hci_conn_hash_lookup_handle(hdev, handle);
4997 hci_dev_unlock(hdev);
4998
4999 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005000 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02005001 sco_recv_scodata(conn, skb);
5002 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005003 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09005004 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03005005 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 }
5007
5008 kfree_skb(skb);
5009}
5010
Johan Hedberg9238f362013-03-05 20:37:48 +02005011static bool hci_req_is_complete(struct hci_dev *hdev)
5012{
5013 struct sk_buff *skb;
5014
5015 skb = skb_peek(&hdev->cmd_q);
5016 if (!skb)
5017 return true;
5018
5019 return bt_cb(skb)->req.start;
5020}
5021
Johan Hedberg42c6b122013-03-05 20:37:49 +02005022static void hci_resend_last(struct hci_dev *hdev)
5023{
5024 struct hci_command_hdr *sent;
5025 struct sk_buff *skb;
5026 u16 opcode;
5027
5028 if (!hdev->sent_cmd)
5029 return;
5030
5031 sent = (void *) hdev->sent_cmd->data;
5032 opcode = __le16_to_cpu(sent->opcode);
5033 if (opcode == HCI_OP_RESET)
5034 return;
5035
5036 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5037 if (!skb)
5038 return;
5039
5040 skb_queue_head(&hdev->cmd_q, skb);
5041 queue_work(hdev->workqueue, &hdev->cmd_work);
5042}
5043
Johan Hedberg9238f362013-03-05 20:37:48 +02005044void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5045{
5046 hci_req_complete_t req_complete = NULL;
5047 struct sk_buff *skb;
5048 unsigned long flags;
5049
5050 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5051
Johan Hedberg42c6b122013-03-05 20:37:49 +02005052 /* If the completed command doesn't match the last one that was
5053 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02005054 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02005055 if (!hci_sent_cmd_data(hdev, opcode)) {
5056 /* Some CSR based controllers generate a spontaneous
5057 * reset complete event during init and any pending
5058 * command will never be completed. In such a case we
5059 * need to resend whatever was the last sent
5060 * command.
5061 */
5062 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5063 hci_resend_last(hdev);
5064
Johan Hedberg9238f362013-03-05 20:37:48 +02005065 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02005066 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005067
5068 /* If the command succeeded and there's still more commands in
5069 * this request the request is not yet complete.
5070 */
5071 if (!status && !hci_req_is_complete(hdev))
5072 return;
5073
5074 /* If this was the last command in a request the complete
5075 * callback would be found in hdev->sent_cmd instead of the
5076 * command queue (hdev->cmd_q).
5077 */
5078 if (hdev->sent_cmd) {
5079 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005080
5081 if (req_complete) {
5082 /* We must set the complete callback to NULL to
5083 * avoid calling the callback more than once if
5084 * this function gets called again.
5085 */
5086 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5087
Johan Hedberg9238f362013-03-05 20:37:48 +02005088 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05005089 }
Johan Hedberg9238f362013-03-05 20:37:48 +02005090 }
5091
5092 /* Remove all pending commands belonging to this request */
5093 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5094 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5095 if (bt_cb(skb)->req.start) {
5096 __skb_queue_head(&hdev->cmd_q, skb);
5097 break;
5098 }
5099
5100 req_complete = bt_cb(skb)->req.complete;
5101 kfree_skb(skb);
5102 }
5103 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5104
5105call_complete:
5106 if (req_complete)
5107 req_complete(hdev, status);
5108}
5109
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005110static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005111{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005112 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005113 struct sk_buff *skb;
5114
5115 BT_DBG("%s", hdev->name);
5116
Linus Torvalds1da177e2005-04-16 15:20:36 -07005117 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01005118 /* Send copy to monitor */
5119 hci_send_to_monitor(hdev, skb);
5120
Linus Torvalds1da177e2005-04-16 15:20:36 -07005121 if (atomic_read(&hdev->promisc)) {
5122 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01005123 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005124 }
5125
Marcel Holtmannfee746b2014-06-29 12:13:05 +02005126 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005127 kfree_skb(skb);
5128 continue;
5129 }
5130
5131 if (test_bit(HCI_INIT, &hdev->flags)) {
5132 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005133 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005134 case HCI_ACLDATA_PKT:
5135 case HCI_SCODATA_PKT:
5136 kfree_skb(skb);
5137 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07005138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139 }
5140
5141 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07005142 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04005144 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145 hci_event_packet(hdev, skb);
5146 break;
5147
5148 case HCI_ACLDATA_PKT:
5149 BT_DBG("%s ACL data packet", hdev->name);
5150 hci_acldata_packet(hdev, skb);
5151 break;
5152
5153 case HCI_SCODATA_PKT:
5154 BT_DBG("%s SCO data packet", hdev->name);
5155 hci_scodata_packet(hdev, skb);
5156 break;
5157
5158 default:
5159 kfree_skb(skb);
5160 break;
5161 }
5162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005163}
5164
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005165static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005166{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005167 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005168 struct sk_buff *skb;
5169
Andrei Emeltchenko21047862012-07-10 15:27:47 +03005170 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5171 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07005172
Linus Torvalds1da177e2005-04-16 15:20:36 -07005173 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02005174 if (atomic_read(&hdev->cmd_cnt)) {
5175 skb = skb_dequeue(&hdev->cmd_q);
5176 if (!skb)
5177 return;
5178
Wei Yongjun7585b972009-02-25 18:29:52 +08005179 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005180
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07005181 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02005182 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005183 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07005184 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005185 if (test_bit(HCI_RESET, &hdev->flags))
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005186 cancel_delayed_work(&hdev->cmd_timer);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02005187 else
Marcel Holtmann65cc2b42014-06-16 12:30:56 +02005188 schedule_delayed_work(&hdev->cmd_timer,
5189 HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005190 } else {
5191 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02005192 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 }
5194 }
5195}
Andre Guedesb1efcc22014-02-26 20:21:40 -03005196
5197void hci_req_add_le_scan_disable(struct hci_request *req)
5198{
5199 struct hci_cp_le_set_scan_enable cp;
5200
5201 memset(&cp, 0, sizeof(cp));
5202 cp.enable = LE_SCAN_DISABLE;
5203 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5204}
Andre Guedesa4790db2014-02-26 20:21:47 -03005205
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005206void hci_req_add_le_passive_scan(struct hci_request *req)
5207{
5208 struct hci_cp_le_set_scan_param param_cp;
5209 struct hci_cp_le_set_scan_enable enable_cp;
5210 struct hci_dev *hdev = req->hdev;
5211 u8 own_addr_type;
5212
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005213 /* Set require_privacy to false since no SCAN_REQ are send
5214 * during passive scanning. Not using an unresolvable address
5215 * here is important so that peer devices using direct
5216 * advertising with our address will be correctly reported
5217 * by the controller.
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005218 */
Marcel Holtmann6ab535a2014-06-29 12:20:15 +02005219 if (hci_update_random_address(req, false, &own_addr_type))
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005220 return;
5221
5222 memset(&param_cp, 0, sizeof(param_cp));
5223 param_cp.type = LE_SCAN_PASSIVE;
5224 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5225 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5226 param_cp.own_address_type = own_addr_type;
5227 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5228 &param_cp);
5229
5230 memset(&enable_cp, 0, sizeof(enable_cp));
5231 enable_cp.enable = LE_SCAN_ENABLE;
Andre Guedes4340a122014-03-10 18:26:24 -03005232 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005233 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5234 &enable_cp);
5235}
5236
Andre Guedesa4790db2014-02-26 20:21:47 -03005237static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5238{
5239 if (status)
5240 BT_DBG("HCI request failed to update background scanning: "
5241 "status 0x%2.2x", status);
5242}
5243
5244/* This function controls the background scanning based on hdev->pend_le_conns
5245 * list. If there are pending LE connection we start the background scanning,
5246 * otherwise we stop it.
5247 *
5248 * This function requires the caller holds hdev->lock.
5249 */
5250void hci_update_background_scan(struct hci_dev *hdev)
5251{
Andre Guedesa4790db2014-02-26 20:21:47 -03005252 struct hci_request req;
5253 struct hci_conn *conn;
5254 int err;
5255
Marcel Holtmann1c1697c2014-06-29 13:41:51 +02005256 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5257 return;
5258
Andre Guedesa4790db2014-02-26 20:21:47 -03005259 hci_req_init(&req, hdev);
5260
5261 if (list_empty(&hdev->pend_le_conns)) {
5262 /* If there is no pending LE connections, we should stop
5263 * the background scanning.
5264 */
5265
5266 /* If controller is not scanning we are done. */
5267 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5268 return;
5269
5270 hci_req_add_le_scan_disable(&req);
5271
5272 BT_DBG("%s stopping background scanning", hdev->name);
5273 } else {
Andre Guedesa4790db2014-02-26 20:21:47 -03005274 /* If there is at least one pending LE connection, we should
5275 * keep the background scan running.
5276 */
5277
Andre Guedesa4790db2014-02-26 20:21:47 -03005278 /* If controller is connecting, we should not start scanning
5279 * since some controllers are not able to scan and connect at
5280 * the same time.
5281 */
5282 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5283 if (conn)
5284 return;
5285
Andre Guedes4340a122014-03-10 18:26:24 -03005286 /* If controller is currently scanning, we stop it to ensure we
5287 * don't miss any advertising (due to duplicates filter).
5288 */
5289 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5290 hci_req_add_le_scan_disable(&req);
5291
Andre Guedes8ef30fd2014-02-26 20:21:55 -03005292 hci_req_add_le_passive_scan(&req);
Andre Guedesa4790db2014-02-26 20:21:47 -03005293
5294 BT_DBG("%s starting background scanning", hdev->name);
5295 }
5296
5297 err = hci_req_run(&req, update_background_scan_complete);
5298 if (err)
5299 BT_ERR("Failed to run HCI request: err %d", err);
5300}